aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 15:55:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 15:55:44 -0700
commitacd53127c4adbd34570b221e7ea1f7fc94aea923 (patch)
tree5e24adc30e91db14bc47ef4287319f38eb1b2108 /drivers
parentMerge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma (diff)
parentsnic: driver for Cisco SCSI HBA (diff)
downloadlinux-dev-acd53127c4adbd34570b221e7ea1f7fc94aea923.tar.xz
linux-dev-acd53127c4adbd34570b221e7ea1f7fc94aea923.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is the usual grab bag of driver updates (lpfc, hpsa, megaraid_sas, cxgbi, be2iscsi) plus an assortment of minor updates. There is also one new driver: the Cisco snic. The advansys driver has been rewritten to get rid of the warning about converting it to the DMA API, the tape statistics patch got in and finally, there's a resuffle of SCSI header files to separate more cleanly initiator from target mode (and better share the common definitions)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (156 commits) snic: driver for Cisco SCSI HBA qla2xxx: Fix indentation qla2xxx: Comment out unreachable code fusion: remove dead MTRR code advansys: fix compilation errors and warnings when CONFIG_PCI is not set mptsas: fix depth param in scsi_track_queue_full megaraid: fix irq setup process regression lpfc: Update version to 10.7.0.0 for upstream patch set. lpfc: Fix to drop PLOGIs from fabric node till LOGO processing completes lpfc: Fix scsi task management error message. lpfc: Fix cq_id masking problem. lpfc: Fix scsi prep dma buf error. lpfc: Add support for using block multi-queue lpfc: Devices are not discovered during takeaway/giveback testing lpfc: Fix vport deletion failure. lpfc: Check for active portpeerbeacon. lpfc: Update driver version for upstream patch set 10.6.0.1. lpfc: Change buffer pool empty message to miscellaneous category lpfc: Fix incorrect log message reported for empty FCF record. lpfc: Fix rport leak. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c27
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/message/fusion/mptbase.c24
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/scsi/Kconfig20
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR53c406a.c1
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/advansys.c1474
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aha1740.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/arm/arxescsi.c1
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/atp870u.h1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h12
-rw-r--r--drivers/scsi/be2iscsi/be_main.c76
-rw-r--r--drivers/scsi/be2iscsi/be_main.h6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c69
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c5
-rw-r--r--drivers/scsi/csiostor/csio_hw.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c20
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c52
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c20
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h6
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/fdomain.c1
-rw-r--r--drivers/scsi/hpsa.c2780
-rw-r--r--drivers/scsi/hpsa.h19
-rw-r--r--drivers/scsi/hpsa_cmd.h34
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/initio.c1
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/ips.c9
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c733
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h201
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h236
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c152
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c9
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h342
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c739
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c554
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h281
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qla1280.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c16
-rw-r--r--drivers/scsi/qlogicfas.c1
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c46
-rw-r--r--drivers/scsi/scsi_common.c178
-rw-r--r--drivers/scsi/scsi_error.c64
-rw-r--r--drivers/scsi/scsi_scan.c65
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/snic/Makefile17
-rw-r--r--drivers/scsi/snic/cq_desc.h77
-rw-r--r--drivers/scsi/snic/cq_enet_desc.h38
-rw-r--r--drivers/scsi/snic/snic.h414
-rw-r--r--drivers/scsi/snic/snic_attrs.c77
-rw-r--r--drivers/scsi/snic/snic_ctl.c279
-rw-r--r--drivers/scsi/snic/snic_debugfs.c560
-rw-r--r--drivers/scsi/snic/snic_disc.c551
-rw-r--r--drivers/scsi/snic/snic_disc.h124
-rw-r--r--drivers/scsi/snic/snic_fwint.h525
-rw-r--r--drivers/scsi/snic/snic_io.c518
-rw-r--r--drivers/scsi/snic/snic_io.h118
-rw-r--r--drivers/scsi/snic/snic_isr.c204
-rw-r--r--drivers/scsi/snic/snic_main.c1044
-rw-r--r--drivers/scsi/snic/snic_res.c295
-rw-r--r--drivers/scsi/snic/snic_res.h97
-rw-r--r--drivers/scsi/snic/snic_scsi.c2632
-rw-r--r--drivers/scsi/snic/snic_stats.h123
-rw-r--r--drivers/scsi/snic/snic_trc.c181
-rw-r--r--drivers/scsi/snic/snic_trc.h121
-rw-r--r--drivers/scsi/snic/vnic_cq.c86
-rw-r--r--drivers/scsi/snic/vnic_cq.h110
-rw-r--r--drivers/scsi/snic/vnic_cq_fw.h62
-rw-r--r--drivers/scsi/snic/vnic_dev.c748
-rw-r--r--drivers/scsi/snic/vnic_dev.h110
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h270
-rw-r--r--drivers/scsi/snic/vnic_intr.c59
-rw-r--r--drivers/scsi/snic/vnic_intr.h105
-rw-r--r--drivers/scsi/snic/vnic_resource.h68
-rw-r--r--drivers/scsi/snic/vnic_snic.h54
-rw-r--r--drivers/scsi/snic/vnic_stats.h68
-rw-r--r--drivers/scsi/snic/vnic_wq.c237
-rw-r--r--drivers/scsi/snic/vnic_wq.h170
-rw-r--r--drivers/scsi/snic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/st.c272
-rw-r--r--drivers/scsi/st.h22
-rw-r--r--drivers/scsi/sym53c416.c1
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufshcd.c108
-rw-r--r--drivers/scsi/ufs/ufshcd.h53
-rw-r--r--drivers/scsi/ufs/ufshci.h8
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/scsi/wd719x.c1
-rw-r--r--drivers/scsi/wd719x.h2
-rw-r--r--drivers/staging/rts5208/rtsx.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_fabric_lib.c4
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c3
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_pscsi.h6
-rw-r--r--drivers/target/target_core_rd.c3
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_spc.c3
-rw-r--r--drivers/target/target_core_stat.c3
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_ua.c3
-rw-r--r--drivers/target/target_core_user.c5
-rw-r--r--drivers/target/target_core_xcopy.c3
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c1
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.h1
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/storage/scsiglue.c1
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--drivers/vhost/scsi.c3
-rw-r--r--drivers/xen/xen-scsiback.c5
174 files changed, 16252 insertions, 3386 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ff20f192b0f6..0422c47261c3 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -139,8 +139,6 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
{0x3237103C, "Smart Array E500", &SA5_access},
- {0x3223103C, "Smart Array P800", &SA5_access},
- {0x3234103C, "Smart Array P400", &SA5_access},
{0x323D103C, "Smart Array P700m", &SA5_access},
};
@@ -574,8 +572,6 @@ static void cciss_procinit(ctlr_info_t *h)
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
- 0x324a103C, /* Smart Array P712m */
- 0x324b103C, /* SmartArray P711m */
0x3223103C, /* Smart Array P800 */
0x3234103C, /* Smart Array P400 */
0x3235103C, /* Smart Array P400i */
@@ -586,12 +582,32 @@ static u32 unresettable_controller[] = {
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
+ 0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
+ 0x40800E11, /* Smart Array 5i */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
+ /* Exclude 640x boards. These are two pci devices in one slot
+ * which share a battery backed cache module. One controls the
+ * cache, the other accesses the cache through the one that controls
+ * it. If we reset the one controlling the cache, the other will
+ * likely not be happy. Just forbid resetting this conjoined mess.
+ */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
@@ -4667,8 +4683,7 @@ static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
*/
cciss_lookup_board_id(pdev, &board_id);
if (!ctlr_is_resettable(board_id)) {
- dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
- "due to shared cache module.");
+ dev_warn(&pdev->dev, "Controller not resettable\n");
return -ENODEV;
}
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index ecd845cd28d8..1537302e56e3 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = {
.show_info = cciss_scsi_show_info,
.queuecommand = cciss_scsi_queue_command,
.this_id = 7,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
/* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
.eh_device_reset_handler= cciss_eh_device_reset_handler,
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index c22606fe3d44..6bac03999fd4 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1611,7 +1611,6 @@ static struct scsi_host_template scsi_driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
- .cmd_per_lun = 1,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b2857b1b112..4556cd11288e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/configfs_macros.h>
#include <target/target_core_base.h>
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 3dae156905de..d85c0c205625 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -245,7 +245,7 @@ struct srpt_send_ioctx {
u8 n_rdma;
u8 n_rbuf;
bool queue_status_only;
- u8 sense_data[SCSI_SENSE_BUFFERSIZE];
+ u8 sense_data[TRANSPORT_SENSE_BUFFER];
};
/**
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 187f83629f7e..5dcc0313c38a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -59,10 +59,6 @@
#include <linux/delay.h>
#include <linux/interrupt.h> /* needed for in_interrupt() proto */
#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
#include <linux/kthread.h>
#include <scsi/scsi_host.h>
@@ -2820,13 +2816,6 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
pci_disable_device(ioc->pcidev);
pci_release_selected_regions(ioc->pcidev, ioc->bars);
-#if defined(CONFIG_MTRR) && 0
- if (ioc->mtrr_reg > 0) {
- mtrr_del(ioc->mtrr_reg, 0, 0);
- dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name));
- }
-#endif
-
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
@@ -4512,19 +4501,6 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
-#if defined(CONFIG_MTRR) && 0
- /*
- * Enable Write Combining MTRR for IOC's memory region.
- * (at least as much as we can; "size and base must be
- * multiples of 4 kiB"
- */
- ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
- sz,
- MTRR_TYPE_WRCOMB, 1);
- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MTRR region registered (base:size=%08x:%x)\n",
- ioc->name, ioc->req_frames_dma, sz));
-#endif
-
for (i = 0; i < ioc->req_depth; i++) {
alloc_dma += ioc->req_sz;
mem += ioc->req_sz;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 8f14090b8b71..813d46311f6a 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -671,7 +671,6 @@ typedef struct _MPT_ADAPTER
u8 *HostPageBuffer; /* SAS - host page buffer support */
u32 HostPageBuffer_sz;
dma_addr_t HostPageBuffer_dma;
- int mtrr_reg;
struct pci_dev *pcidev; /* struct pci_dev pointer */
int bars; /* bitmask of BAR's that must be configured */
int msi_enable;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 5bdaae15a742..005a88b9f440 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4090,7 +4090,7 @@ mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
continue;
}
depth = scsi_track_queue_full(sdev,
- current_depth - 1);
+ sdev->queue_depth - 1);
if (depth > 0)
sdev_printk(KERN_INFO, sdev,
"Queue depth reduced to (%d)\n",
@@ -4100,7 +4100,7 @@ mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
"Tagged Command Queueing is being "
"disabled\n");
else if (depth == 0)
- sdev_printk(KERN_INFO, sdev,
+ sdev_printk(KERN_DEBUG, sdev,
"Queue depth not changed yet\n");
}
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 75f4bfc2b98a..b3c6ff49103b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -297,7 +297,6 @@ static struct scsi_host_template zfcp_scsi_host_template = {
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .cmd_per_lun = 1,
.use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index b94c217a09ae..456e1567841c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -503,7 +503,7 @@ config SCSI_DPT_I2O
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
- depends on SCSI && VIRT_TO_BUS && !ARM
+ depends on SCSI
depends on ISA || EISA || PCI
help
This is a driver for all SCSI host adapters manufactured by
@@ -634,6 +634,23 @@ config FCOE_FNIC
<file:Documentation/scsi/scsi.txt>.
The module will be called fnic.
+config SCSI_SNIC
+ tristate "Cisco SNIC Driver"
+ depends on PCI && SCSI
+ help
+ This is support for the Cisco PCI-Express SCSI HBA.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called snic.
+
+config SCSI_SNIC_DEBUG_FS
+ bool "Cisco SNIC Driver Debugfs Support"
+ depends on SCSI_SNIC && DEBUG_FS
+ help
+ This enables to list debugging information from SNIC Driver
+ available via debugfs file system
+
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
@@ -1743,7 +1760,6 @@ config SCSI_BFA_FC
config SCSI_VIRTIO
tristate "virtio-scsi support"
depends on VIRTIO
- select BLK_DEV_INTEGRITY
help
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index dee160a4f163..91209e3d27e3 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -161,6 +162,7 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-y += scsi_common.o
scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 42c7161474f7..6e110c630d2c 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -1064,7 +1064,6 @@ static struct scsi_host_template driver_template =
.can_queue = 1 /* can_queue */,
.this_id = 7 /* SCSI ID of the chip */,
.sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
- .cmd_per_lun = 1 /* commands per lun */,
.unchecked_isa_dma = 1 /* unchecked_isa_dma */,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7e33a61c1ba4..cac6b37d7b1b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = {
.can_queue = 1,
.this_id = 1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 4596e9dd757c..e63cf9f22f36 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -46,7 +46,7 @@
static int aac_src_get_sync_status(struct aac_dev *dev);
-irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
{
struct aac_msix_ctx *ctx;
struct aac_dev *dev;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ae95e347f37d..4305178e4e01 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -1,12 +1,10 @@
-#define DRV_NAME "advansys"
-#define ASC_VERSION "3.4" /* AdvanSys Driver Version */
-
/*
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
*
* Copyright (c) 1995-2000 Advanced System Products, Inc.
* Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
* Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (c) 2014 Hannes Reinecke <hare@suse.de>
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -39,6 +37,7 @@
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/dmapool.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -49,26 +48,15 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
+#define DRV_NAME "advansys"
+#define ASC_VERSION "3.5" /* AdvanSys Driver Version */
+
/* FIXME:
*
- * 1. Although all of the necessary command mapping places have the
- * appropriate dma_map.. APIs, the driver still processes its internal
- * queue using bus_to_virt() and virt_to_bus() which are illegal under
- * the API. The entire queue processing structure will need to be
- * altered to fix this.
- * 2. Need to add memory mapping workaround. Test the memory mapping.
- * If it doesn't work revert to I/O port access. Can a test be done
- * safely?
- * 3. Handle an interrupt not working. Keep an interrupt counter in
- * the interrupt handler. In the timeout function if the interrupt
- * has not occurred then print a message and run in polled mode.
- * 4. Need to add support for target mode commands, cf. CAM XPT.
- * 5. check DMA mapping functions for failure
- * 6. Use scsi_transport_spi
- * 7. advansys_info is not safe against multiple simultaneous callers
- * 8. Add module_param to override ISA/VLB ioport array
+ * 1. Use scsi_transport_spi
+ * 2. advansys_info is not safe against multiple simultaneous callers
+ * 3. Add module_param to override ISA/VLB ioport array
*/
-#warning this driver is still not properly converted to the DMA API
/* Enable driver /proc statistics. */
#define ADVANSYS_STATS
@@ -76,31 +64,8 @@
/* Enable driver tracing. */
#undef ADVANSYS_DEBUG
-/*
- * Portable Data Types
- *
- * Any instance where a 32-bit long or pointer type is assumed
- * for precision or HW defined structures, the following define
- * types must be used. In Linux the char, short, and int types
- * are all consistent at 8, 16, and 32 bits respectively. Pointers
- * and long types are 64 bits on Alpha and UltraSPARC.
- */
-#define ASC_PADDR __u32 /* Physical/Bus address data type. */
-#define ASC_VADDR __u32 /* Virtual address data type. */
-#define ASC_DCNT __u32 /* Unsigned Data count type. */
-#define ASC_SDCNT __s32 /* Signed Data count type. */
-
typedef unsigned char uchar;
-#ifndef TRUE
-#define TRUE (1)
-#endif
-#ifndef FALSE
-#define FALSE (0)
-#endif
-
-#define ERR (-1)
-#define UW_ERR (uint)(0xFFFF)
#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0)
#define PCI_VENDOR_ID_ASP 0x10cd
@@ -111,15 +76,6 @@ typedef unsigned char uchar;
#define PCI_DEVICE_ID_38C0800_REV1 0x2500
#define PCI_DEVICE_ID_38C1600_REV1 0x2700
-/*
- * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
- * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
- * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the
- * SRB structure.
- */
-#define CC_VERY_LONG_SG_LIST 0
-#define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr)
-
#define PortAddr unsigned int /* port address size */
#define inp(port) inb(port)
#define outp(port, byte) outb((byte), (port))
@@ -307,15 +263,15 @@ typedef struct asc_scsiq_1 {
uchar sg_queue_cnt;
uchar target_id;
uchar target_lun;
- ASC_PADDR data_addr;
- ASC_DCNT data_cnt;
- ASC_PADDR sense_addr;
+ __le32 data_addr;
+ __le32 data_cnt;
+ __le32 sense_addr;
uchar sense_len;
uchar extra_bytes;
} ASC_SCSIQ_1;
typedef struct asc_scsiq_2 {
- ASC_VADDR srb_ptr;
+ u32 srb_tag;
uchar target_ix;
uchar flag;
uchar cdb_len;
@@ -338,8 +294,8 @@ typedef struct asc_scsiq_4 {
uchar y_res;
ushort x_req_count;
ushort x_reconnect_rtn;
- ASC_PADDR x_saved_data_addr;
- ASC_DCNT x_saved_data_cnt;
+ __le32 x_saved_data_addr;
+ __le32 x_saved_data_cnt;
} ASC_SCSIQ_4;
typedef struct asc_q_done_info {
@@ -351,12 +307,12 @@ typedef struct asc_q_done_info {
uchar sense_len;
uchar extra_bytes;
uchar res;
- ASC_DCNT remain_bytes;
+ u32 remain_bytes;
} ASC_QDONE_INFO;
typedef struct asc_sg_list {
- ASC_PADDR addr;
- ASC_DCNT bytes;
+ __le32 addr;
+ __le32 bytes;
} ASC_SG_LIST;
typedef struct asc_sg_head {
@@ -376,17 +332,6 @@ typedef struct asc_scsi_q {
ushort next_sg_index;
} ASC_SCSI_Q;
-typedef struct asc_scsi_req_q {
- ASC_SCSIQ_1 r1;
- ASC_SCSIQ_2 r2;
- uchar *cdbptr;
- ASC_SG_HEAD *sg_head;
- uchar *sense_ptr;
- ASC_SCSIQ_3 r3;
- uchar cdb[ASC_MAX_CDB_LEN];
- uchar sense[ASC_MIN_SENSE_LEN];
-} ASC_SCSI_REQ_Q;
-
typedef struct asc_scsi_bios_req_q {
ASC_SCSIQ_1 r1;
ASC_SCSIQ_2 r2;
@@ -570,7 +515,7 @@ typedef struct asc_dvc_var {
dma_addr_t overrun_dma;
uchar scsi_reset_wait;
uchar chip_no;
- char is_in_int;
+ bool is_in_int;
uchar max_total_qng;
uchar cur_total_qng;
uchar in_critical_cnt;
@@ -586,15 +531,13 @@ typedef struct asc_dvc_var {
char redo_scam;
ushort res2;
uchar dos_int13_table[ASC_MAX_TID + 1];
- ASC_DCNT max_dma_count;
+ unsigned int max_dma_count;
ASC_SCSI_BIT_ID_TYPE no_scam;
ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
uchar min_sdtr_index;
uchar max_sdtr_index;
struct asc_board *drv_ptr;
- int ptr_map_count;
- void **ptr_map;
- ASC_DCNT uc_break;
+ unsigned int uc_break;
} ASC_DVC_VAR;
typedef struct asc_dvc_inq_info {
@@ -602,8 +545,8 @@ typedef struct asc_dvc_inq_info {
} ASC_DVC_INQ_INFO;
typedef struct asc_cap_info {
- ASC_DCNT lba;
- ASC_DCNT blk_size;
+ u32 lba;
+ u32 blk_size;
} ASC_CAP_INFO;
typedef struct asc_cap_info_array {
@@ -929,31 +872,6 @@ typedef struct asc_mc_saved {
#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID)
#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data)
-/*
- * Portable Data Types
- *
- * Any instance where a 32-bit long or pointer type is assumed
- * for precision or HW defined structures, the following define
- * types must be used. In Linux the char, short, and int types
- * are all consistent at 8, 16, and 32 bits respectively. Pointers
- * and long types are 64 bits on Alpha and UltraSPARC.
- */
-#define ADV_PADDR __u32 /* Physical address data type. */
-#define ADV_VADDR __u32 /* Virtual address data type. */
-#define ADV_DCNT __u32 /* Unsigned Data count type. */
-#define ADV_SDCNT __s32 /* Signed Data count type. */
-
-/*
- * These macros are used to convert a virtual address to a
- * 32-bit value. This currently can be used on Linux Alpha
- * which uses 64-bit virtual address but a 32-bit bus address.
- * This is likely to break in the future, but doing this now
- * will give us time to change the HW and FW to handle 64-bit
- * addresses.
- */
-#define ADV_VADDR_TO_U32 virt_to_bus
-#define ADV_U32_TO_VADDR bus_to_virt
-
#define AdvPortAddr void __iomem * /* Virtual memory address size */
/*
@@ -965,8 +883,6 @@ typedef struct asc_mc_saved {
#define ADV_MEM_WRITEW(addr, word) writew(word, addr)
#define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr)
-#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15)
-
/*
* Define total number of simultaneous maximum element scatter-gather
* request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the
@@ -1747,44 +1663,37 @@ typedef struct adveep_38C1600_config {
* little-endian.
*/
typedef struct adv_carr_t {
- ADV_VADDR carr_va; /* Carrier Virtual Address */
- ADV_PADDR carr_pa; /* Carrier Physical Address */
- ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */
+ __le32 carr_va; /* Carrier Virtual Address */
+ __le32 carr_pa; /* Carrier Physical Address */
+ __le32 areq_vpa; /* ADV_SCSI_REQ_Q Virtual or Physical Address */
/*
* next_vpa [31:4] Carrier Virtual or Physical Next Pointer
*
* next_vpa [3:1] Reserved Bits
* next_vpa [0] Done Flag set in Response Queue.
*/
- ADV_VADDR next_vpa;
+ __le32 next_vpa;
} ADV_CARR_T;
/*
* Mask used to eliminate low 4 bits of carrier 'next_vpa' field.
*/
-#define ASC_NEXT_VPA_MASK 0xFFFFFFF0
-
-#define ASC_RQ_DONE 0x00000001
-#define ASC_RQ_GOOD 0x00000002
-#define ASC_CQ_STOPPER 0x00000000
+#define ADV_NEXT_VPA_MASK 0xFFFFFFF0
-#define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK)
+#define ADV_RQ_DONE 0x00000001
+#define ADV_RQ_GOOD 0x00000002
+#define ADV_CQ_STOPPER 0x00000000
-#define ADV_CARRIER_NUM_PAGE_CROSSING \
- (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE)
-
-#define ADV_CARRIER_BUFSIZE \
- ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T))
+#define ADV_GET_CARRP(carrp) ((carrp) & ADV_NEXT_VPA_MASK)
/*
- * ASC_SCSI_REQ_Q 'a_flag' definitions
- *
- * The Adv Library should limit use to the lower nibble (4 bits) of
- * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag.
+ * Each carrier is 64 bytes, and we need three additional
+ * carrier for icq, irq, and the termination carrier.
*/
-#define ADV_POLL_REQUEST 0x01 /* poll for request completion */
-#define ADV_SCSIQ_DONE 0x02 /* request done */
-#define ADV_DONT_RETRY 0x08 /* don't do retry */
+#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 3)
+
+#define ADV_CARRIER_BUFSIZE \
+ (ADV_CARRIER_COUNT * sizeof(ADV_CARR_T))
#define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */
#define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */
@@ -1816,15 +1725,15 @@ typedef struct adv_dvc_cfg {
struct adv_dvc_var;
struct adv_scsi_req_q;
-typedef struct asc_sg_block {
+typedef struct adv_sg_block {
uchar reserved1;
uchar reserved2;
uchar reserved3;
uchar sg_cnt; /* Valid entries in block. */
- ADV_PADDR sg_ptr; /* Pointer to next sg block. */
+ __le32 sg_ptr; /* Pointer to next sg block. */
struct {
- ADV_PADDR sg_addr; /* SG element address. */
- ADV_DCNT sg_count; /* SG element count. */
+ __le32 sg_addr; /* SG element address. */
+ __le32 sg_count; /* SG element count. */
} sg_list[NO_OF_SG_PER_BLOCK];
} ADV_SG_BLOCK;
@@ -1844,10 +1753,10 @@ typedef struct adv_scsi_req_q {
uchar target_cmd;
uchar target_id; /* Device target identifier. */
uchar target_lun; /* Device target logical unit number. */
- ADV_PADDR data_addr; /* Data buffer physical address. */
- ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */
- ADV_PADDR sense_addr;
- ADV_PADDR carr_pa;
+ __le32 data_addr; /* Data buffer physical address. */
+ __le32 data_cnt; /* Data count. Ucode sets to residual. */
+ __le32 sense_addr;
+ __le32 carr_pa;
uchar mflag;
uchar sense_len;
uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */
@@ -1857,29 +1766,26 @@ typedef struct adv_scsi_req_q {
uchar host_status; /* Ucode host status. */
uchar sg_working_ix;
uchar cdb[12]; /* SCSI CDB bytes 0-11. */
- ADV_PADDR sg_real_addr; /* SG list physical address. */
- ADV_PADDR scsiq_rptr;
+ __le32 sg_real_addr; /* SG list physical address. */
+ __le32 scsiq_rptr;
uchar cdb16[4]; /* SCSI CDB bytes 12-15. */
- ADV_VADDR scsiq_ptr;
- ADV_VADDR carr_va;
+ __le32 scsiq_ptr;
+ __le32 carr_va;
/*
* End of microcode structure - 60 bytes. The rest of the structure
* is used by the Adv Library and ignored by the microcode.
*/
- ADV_VADDR srb_ptr;
+ u32 srb_tag;
ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */
- char *vdata_addr; /* Data buffer virtual address. */
- uchar a_flag;
- uchar pad[2]; /* Pad out to a word boundary. */
} ADV_SCSI_REQ_Q;
/*
* The following two structures are used to process Wide Board requests.
*
* The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library
- * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the
- * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the
- * Mid-Level SCSI request structure.
+ * and microcode with the ADV_SCSI_REQ_Q field 'srb_tag' set to the
+ * SCSI request tag. The adv_req_t structure 'cmndp' field in turn points
+ * to the Mid-Level SCSI request structure.
*
* Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each
* ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux
@@ -1890,17 +1796,17 @@ typedef struct adv_scsi_req_q {
*/
typedef struct adv_sgblk {
ADV_SG_BLOCK sg_block; /* Sgblock structure. */
- uchar align[32]; /* Sgblock structure padding. */
+ dma_addr_t sg_addr; /* Physical address */
struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */
} adv_sgblk_t;
typedef struct adv_req {
ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */
- uchar align[32]; /* Request structure padding. */
+ uchar align[24]; /* Request structure padding. */
struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */
+ dma_addr_t req_addr;
adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */
- struct adv_req *next_reqp; /* Next Request Structure. */
-} adv_req_t;
+} adv_req_t __aligned(32);
/*
* Adapter operation variable structure.
@@ -1937,12 +1843,12 @@ typedef struct adv_dvc_var {
uchar chip_scsi_id; /* chip SCSI target ID */
uchar chip_type;
uchar bist_err_code;
- ADV_CARR_T *carrier_buf;
+ ADV_CARR_T *carrier;
ADV_CARR_T *carr_freelist; /* Carrier free list. */
+ dma_addr_t carrier_addr;
ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */
ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */
ushort carr_pending_cnt; /* Count of pending carriers. */
- struct adv_req *orig_reqp; /* adv_req_t memory block. */
/*
* Note: The following fields will not be used after initialization. The
* driver may discard the buffer after initialization is done.
@@ -2068,8 +1974,8 @@ do { \
AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV)
/*
- * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must
- * match the ASC_SCSI_REQ_Q 'srb_ptr' field.
+ * Abort an SRB in the chip's RISC Memory. The 'srb_tag' argument must
+ * match the ADV_SCSI_REQ_Q 'srb_tag' field.
*
* If the request has not yet been sent to the device it will simply be
* aborted from RISC memory. If the request is disconnected it will be
@@ -2079,9 +1985,9 @@ do { \
* ADV_TRUE(1) - Queue was successfully aborted.
* ADV_FALSE(0) - Queue was not found on the active queue list.
*/
-#define AdvAbortQueue(asc_dvc, scsiq) \
- AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
- (ADV_DCNT) (scsiq))
+#define AdvAbortQueue(asc_dvc, srb_tag) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
+ (ADV_DCNT) (srb_tag))
/*
* Send a Bus Device Reset Message to the specified target ID.
@@ -2095,8 +2001,8 @@ do { \
* are not purged.
*/
#define AdvResetDevice(asc_dvc, target_id) \
- AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
- (ADV_DCNT) (target_id))
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
+ (ADV_DCNT) (target_id))
/*
* SCSI Wide Type definition.
@@ -2115,7 +2021,7 @@ do { \
#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID))
/*
- * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values.
+ * ADV_SCSI_REQ_Q 'done_status' and 'host_status' return values.
*/
#define QD_NO_STATUS 0x00 /* Request not completed yet. */
@@ -2153,8 +2059,6 @@ do { \
#define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */
/* Return the address that is aligned at the next doubleword >= to 'addr'. */
-#define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7)
-#define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF)
#define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F)
/*
@@ -2315,24 +2219,24 @@ do { \
/* Per board statistics structure */
struct asc_stats {
/* Driver Entrypoint Statistics */
- ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */
- ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */
- ADV_DCNT biosparam; /* # calls to advansys_biosparam() */
- ADV_DCNT interrupt; /* # advansys_interrupt() calls */
- ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */
- ADV_DCNT done; /* # calls to request's scsi_done function */
- ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
- ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
- ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
+ unsigned int queuecommand; /* # calls to advansys_queuecommand() */
+ unsigned int reset; /* # calls to advansys_eh_bus_reset() */
+ unsigned int biosparam; /* # calls to advansys_biosparam() */
+ unsigned int interrupt; /* # advansys_interrupt() calls */
+ unsigned int callback; /* # calls to asc/adv_isr_callback() */
+ unsigned int done; /* # calls to request's scsi_done function */
+ unsigned int build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
+ unsigned int adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
+ unsigned int adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
/* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */
- ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */
- ADV_DCNT exe_busy; /* # ASC_BUSY returns. */
- ADV_DCNT exe_error; /* # ASC_ERROR returns. */
- ADV_DCNT exe_unknown; /* # unknown returns. */
+ unsigned int exe_noerror; /* # ASC_NOERROR returns. */
+ unsigned int exe_busy; /* # ASC_BUSY returns. */
+ unsigned int exe_error; /* # ASC_ERROR returns. */
+ unsigned int exe_unknown; /* # unknown returns. */
/* Data Transfer Statistics */
- ADV_DCNT xfer_cnt; /* # I/O requests received */
- ADV_DCNT xfer_elem; /* # scatter-gather elements */
- ADV_DCNT xfer_sect; /* # 512-byte blocks */
+ unsigned int xfer_cnt; /* # I/O requests received */
+ unsigned int xfer_elem; /* # scatter-gather elements */
+ unsigned int xfer_sect; /* # 512-byte blocks */
};
#endif /* ADVANSYS_STATS */
@@ -2345,6 +2249,7 @@ struct asc_stats {
*/
struct asc_board {
struct device *dev;
+ struct Scsi_Host *shost;
uint flags; /* Board flags */
unsigned int irq;
union {
@@ -2366,7 +2271,6 @@ struct asc_board {
ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */
ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */
} eep_config;
- ulong last_reset; /* Saved last reset time */
/* /proc/scsi/advansys/[0...] */
#ifdef ADVANSYS_STATS
struct asc_stats asc_stats; /* Board statistics */
@@ -2381,7 +2285,9 @@ struct asc_board {
void __iomem *ioremap_addr; /* I/O Memory remap address. */
ushort ioport; /* I/O Port address. */
adv_req_t *adv_reqp; /* Request structures. */
- adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */
+ dma_addr_t adv_reqp_addr;
+ size_t adv_reqp_size;
+ struct dma_pool *adv_sgblk_pool; /* Scatter-gather structures. */
ushort bios_signature; /* BIOS Signature. */
ushort bios_version; /* BIOS Version. */
ushort bios_codeseg; /* BIOS Code Segment. */
@@ -2470,12 +2376,11 @@ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h)
printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n",
(unsigned)h->start_motor, (unsigned)h->scsi_reset_wait);
- printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n",
+ printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%p\n",
(unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng,
- (ulong)h->carr_freelist);
+ h->carr_freelist);
- printk(" icq_sp 0x%lx, irq_sp 0x%lx\n",
- (ulong)h->icq_sp, (ulong)h->irq_sp);
+ printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp);
printk(" no_scam 0x%x, tagqng_able 0x%x\n",
(unsigned)h->no_scam, (unsigned)h->tagqng_able);
@@ -2600,8 +2505,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q);
printk
- (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n",
- q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr,
+ (" target_ix 0x%x, target_lun %u, srb_tag 0x%x, tag_code 0x%x,\n",
+ q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag,
q->q2.tag_code);
printk
@@ -2634,8 +2539,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
{
printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q);
- printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n",
- (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len,
+ printk(" srb_tag 0x%x, target_ix %u, cdb_len %u, tag_code %u,\n",
+ q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len,
q->d2.tag_code);
printk
(" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n",
@@ -2651,17 +2556,17 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
{
int i;
- printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n",
+ printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %d)\n",
(ulong)b, sgblockno);
- printk(" sg_cnt %u, sg_ptr 0x%lx\n",
- b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr));
+ printk(" sg_cnt %u, sg_ptr 0x%x\n",
+ b->sg_cnt, (u32)le32_to_cpu(b->sg_ptr));
BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK);
if (b->sg_ptr != 0)
BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK);
for (i = 0; i < b->sg_cnt; i++) {
- printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n",
- i, (ulong)b->sg_list[i].sg_addr,
- (ulong)b->sg_list[i].sg_count);
+ printk(" [%u]: sg_addr 0x%x, sg_count 0x%x\n",
+ i, (u32)le32_to_cpu(b->sg_list[i].sg_addr),
+ (u32)le32_to_cpu(b->sg_list[i].sg_count));
}
}
@@ -2673,15 +2578,16 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
{
int sg_blk_cnt;
- struct asc_sg_block *sg_ptr;
+ struct adv_sg_block *sg_ptr;
+ adv_sgblk_t *sgblkp;
printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q);
- printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n",
- q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag);
+ printk(" target_id %u, target_lun %u, srb_tag 0x%x\n",
+ q->target_id, q->target_lun, q->srb_tag);
- printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n",
- q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr);
+ printk(" cntl 0x%x, data_addr 0x%lx\n",
+ q->cntl, (ulong)le32_to_cpu(q->data_addr));
printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
(ulong)le32_to_cpu(q->data_cnt),
@@ -2700,21 +2606,15 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
/* Display the request's ADV_SG_BLOCK structures. */
if (q->sg_list_ptr != NULL) {
+ sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block);
sg_blk_cnt = 0;
- while (1) {
- /*
- * 'sg_ptr' is a physical address. Convert it to a virtual
- * address by indexing 'sg_blk_cnt' into the virtual address
- * array 'sg_list_ptr'.
- *
- * XXX - Assumes all SG physical blocks are virtually contiguous.
- */
- sg_ptr =
- &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]);
+ while (sgblkp) {
+ sg_ptr = &sgblkp->sg_block;
asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr);
if (sg_ptr->sg_ptr == 0) {
break;
}
+ sgblkp = sgblkp->next_sgblkp;
sg_blk_cnt++;
}
}
@@ -2722,59 +2622,6 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
#endif /* ADVANSYS_DEBUG */
/*
- * The advansys chip/microcode contains a 32-bit identifier for each command
- * known as the 'srb'. I don't know what it stands for. The driver used
- * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it
- * with bus_to_virt. Now the driver keeps a per-host map of integers to
- * pointers. It auto-expands when full, unless it can't allocate memory.
- * Note that an srb of 0 is treated specially by the chip/firmware, hence
- * the return of i+1 in this routine, and the corresponding subtraction in
- * the inverse routine.
- */
-#define BAD_SRB 0
-static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr)
-{
- int i;
- void **new_ptr;
-
- for (i = 0; i < asc_dvc->ptr_map_count; i++) {
- if (!asc_dvc->ptr_map[i])
- goto out;
- }
-
- if (asc_dvc->ptr_map_count == 0)
- asc_dvc->ptr_map_count = 1;
- else
- asc_dvc->ptr_map_count *= 2;
-
- new_ptr = krealloc(asc_dvc->ptr_map,
- asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC);
- if (!new_ptr)
- return BAD_SRB;
- asc_dvc->ptr_map = new_ptr;
- out:
- ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i);
- asc_dvc->ptr_map[i] = ptr;
- return i + 1;
-}
-
-static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb)
-{
- void *ptr;
-
- srb--;
- if (srb >= asc_dvc->ptr_map_count) {
- printk("advansys: bad SRB %u, max %u\n", srb,
- asc_dvc->ptr_map_count);
- return NULL;
- }
- ptr = asc_dvc->ptr_map[srb];
- asc_dvc->ptr_map[srb] = NULL;
- ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb);
- return ptr;
-}
-
-/*
* advansys_info()
*
* Return suitable for printing on the console with the argument
@@ -3350,7 +3197,7 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
seq_printf(m,
" flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
- boardp->flags, boardp->last_reset, jiffies,
+ boardp->flags, shost->last_reset, jiffies,
boardp->asc_n_io_port);
seq_printf(m, " io_port 0x%lx\n", shost->io_port);
@@ -3844,7 +3691,7 @@ static int AscStartChip(PortAddr iop_base)
return (1);
}
-static int AscStopChip(PortAddr iop_base)
+static bool AscStopChip(PortAddr iop_base)
{
uchar cc_val;
@@ -3855,22 +3702,22 @@ static int AscStopChip(PortAddr iop_base)
AscSetChipIH(iop_base, INS_HALT);
AscSetChipIH(iop_base, INS_RFLAG_WTM);
if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
- return (0);
+ return false;
}
- return (1);
+ return true;
}
-static int AscIsChipHalted(PortAddr iop_base)
+static bool AscIsChipHalted(PortAddr iop_base)
{
if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
- return (1);
+ return true;
}
}
- return (0);
+ return false;
}
-static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc)
+static bool AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc)
{
PortAddr iop_base;
int i = 10;
@@ -3953,20 +3800,6 @@ static ushort AscReadLramWord(PortAddr iop_base, ushort addr)
return (word_data);
}
-#if CC_VERY_LONG_SG_LIST
-static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr)
-{
- ushort val_low, val_high;
- ASC_DCNT dword_data;
-
- AscSetChipLramAddr(iop_base, addr);
- val_low = AscGetChipLramData(iop_base);
- val_high = AscGetChipLramData(iop_base);
- dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low;
- return (dword_data);
-}
-#endif /* CC_VERY_LONG_SG_LIST */
-
static void
AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words)
{
@@ -4068,27 +3901,24 @@ AscMemWordCopyPtrFromLram(PortAddr iop_base,
}
}
-static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words)
+static u32 AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words)
{
- ASC_DCNT sum;
+ u32 sum = 0;
int i;
- sum = 0L;
for (i = 0; i < words; i++, s_addr += 2) {
sum += AscReadLramWord(iop_base, s_addr);
}
return (sum);
}
-static ushort AscInitLram(ASC_DVC_VAR *asc_dvc)
+static void AscInitLram(ASC_DVC_VAR *asc_dvc)
{
uchar i;
ushort s_addr;
PortAddr iop_base;
- ushort warn_code;
iop_base = asc_dvc->iop_base;
- warn_code = 0;
AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
(ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) *
64) >> 1));
@@ -4127,14 +3957,13 @@ static ushort AscInitLram(ASC_DVC_VAR *asc_dvc)
AscWriteLramByte(iop_base,
(ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i);
}
- return warn_code;
}
-static ASC_DCNT
+static u32
AscLoadMicroCode(PortAddr iop_base, ushort s_addr,
const uchar *mcode_buf, ushort mcode_size)
{
- ASC_DCNT chksum;
+ u32 chksum;
ushort mcode_word_size;
ushort mcode_chksum;
@@ -4186,13 +4015,13 @@ static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc)
}
}
-static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
+static int AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
{
int i;
- ushort warn_code;
+ int warn_code;
PortAddr iop_base;
- ASC_PADDR phy_addr;
- ASC_DCNT phy_size;
+ __le32 phy_addr;
+ __le32 phy_size;
struct asc_board *board = asc_dvc_to_board(asc_dvc);
iop_base = asc_dvc->iop_base;
@@ -4231,12 +4060,12 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
- warn_code = UW_ERR;
+ warn_code = -EINVAL;
goto err_mcode_start;
}
if (AscStartChip(iop_base) != 1) {
asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
- warn_code = UW_ERR;
+ warn_code = -EIO;
goto err_mcode_start;
}
@@ -4250,13 +4079,13 @@ err_dma_map:
return warn_code;
}
-static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
+static int AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
{
const struct firmware *fw;
const char fwname[] = "advansys/mcode.bin";
int err;
unsigned long chksum;
- ushort warn_code;
+ int warn_code;
PortAddr iop_base;
iop_base = asc_dvc->iop_base;
@@ -4268,15 +4097,13 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
}
asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
if (asc_dvc->err_code != 0)
- return UW_ERR;
+ return ASC_ERROR;
if (!AscFindSignature(asc_dvc->iop_base)) {
asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
return warn_code;
}
AscDisableInterrupt(iop_base);
- warn_code |= AscInitLram(asc_dvc);
- if (asc_dvc->err_code != 0)
- return UW_ERR;
+ AscInitLram(asc_dvc);
err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
if (err) {
@@ -4336,7 +4163,7 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf,
int size, int memsize, int chksum)
{
int i, j, end, len = 0;
- ADV_DCNT sum;
+ u32 sum;
AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
@@ -4382,38 +4209,72 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf,
return 0;
}
-static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc)
+static void AdvBuildCarrierFreelist(struct adv_dvc_var *adv_dvc)
{
- ADV_CARR_T *carrp;
- ADV_SDCNT buf_size;
- ADV_PADDR carr_paddr;
+ off_t carr_offset = 0, next_offset;
+ dma_addr_t carr_paddr;
+ int carr_num = ADV_CARRIER_BUFSIZE / sizeof(ADV_CARR_T), i;
- carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf);
- asc_dvc->carr_freelist = NULL;
- if (carrp == asc_dvc->carrier_buf) {
- buf_size = ADV_CARRIER_BUFSIZE;
- } else {
- buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T);
+ for (i = 0; i < carr_num; i++) {
+ carr_offset = i * sizeof(ADV_CARR_T);
+ /* Get physical address of the carrier 'carrp'. */
+ carr_paddr = adv_dvc->carrier_addr + carr_offset;
+
+ adv_dvc->carrier[i].carr_pa = cpu_to_le32(carr_paddr);
+ adv_dvc->carrier[i].carr_va = cpu_to_le32(carr_offset);
+ adv_dvc->carrier[i].areq_vpa = 0;
+ next_offset = carr_offset + sizeof(ADV_CARR_T);
+ if (i == carr_num)
+ next_offset = ~0;
+ adv_dvc->carrier[i].next_vpa = cpu_to_le32(next_offset);
}
+ /*
+ * We cannot have a carrier with 'carr_va' of '0', as
+ * a reference to this carrier would be interpreted as
+ * list termination.
+ * So start at carrier 1 with the freelist.
+ */
+ adv_dvc->carr_freelist = &adv_dvc->carrier[1];
+}
- do {
- /* Get physical address of the carrier 'carrp'. */
- carr_paddr = cpu_to_le32(virt_to_bus(carrp));
+static ADV_CARR_T *adv_get_carrier(struct adv_dvc_var *adv_dvc, u32 offset)
+{
+ int index;
- buf_size -= sizeof(ADV_CARR_T);
+ BUG_ON(offset > ADV_CARRIER_BUFSIZE);
- carrp->carr_pa = carr_paddr;
- carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp));
+ index = offset / sizeof(ADV_CARR_T);
+ return &adv_dvc->carrier[index];
+}
- /*
- * Insert the carrier at the beginning of the freelist.
- */
- carrp->next_vpa =
- cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist));
- asc_dvc->carr_freelist = carrp;
+static ADV_CARR_T *adv_get_next_carrier(struct adv_dvc_var *adv_dvc)
+{
+ ADV_CARR_T *carrp = adv_dvc->carr_freelist;
+ u32 next_vpa = le32_to_cpu(carrp->next_vpa);
+
+ if (next_vpa == 0 || next_vpa == ~0) {
+ ASC_DBG(1, "invalid vpa offset 0x%x\n", next_vpa);
+ return NULL;
+ }
+
+ adv_dvc->carr_freelist = adv_get_carrier(adv_dvc, next_vpa);
+ /*
+ * insert stopper carrier to terminate list
+ */
+ carrp->next_vpa = cpu_to_le32(ADV_CQ_STOPPER);
+
+ return carrp;
+}
+
+/*
+ * 'offset' is the index in the request pointer array
+ */
+static adv_req_t * adv_get_reqp(struct adv_dvc_var *adv_dvc, u32 offset)
+{
+ struct asc_board *boardp = adv_dvc->drv_ptr;
- carrp++;
- } while (buf_size > 0);
+ BUG_ON(offset > adv_dvc->max_host_qng);
+ return &boardp->adv_reqp[offset];
}
/*
@@ -4432,10 +4293,9 @@ static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc)
*/
static int
AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
- ushort idle_cmd, ADV_DCNT idle_cmd_parameter)
+ ushort idle_cmd, u32 idle_cmd_parameter)
{
- int result;
- ADV_DCNT i, j;
+ int result, i, j;
AdvPortAddr iop_base;
iop_base = asc_dvc->iop_base;
@@ -4902,17 +4762,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
* Set-up the Host->RISC Initiator Command Queue (ICQ).
*/
- if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->icq_sp) {
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
-
- /*
- * The first command issued will be placed in the stopper carrier.
- */
- asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC ICQ physical address start value.
@@ -4922,21 +4776,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
/*
* Set-up the RISC->Host Initiator Response Queue (IRQ).
*/
- if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->irq_sp) {
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
-
- /*
- * The first command completed by the RISC will be placed in
- * the stopper.
- *
- * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
- * completed the RISC will set the ASC_RQ_STOPPER bit.
- */
- asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC IRQ physical address start value.
@@ -5399,17 +5243,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
* Set-up the Host->RISC Initiator Command Queue (ICQ).
*/
- if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->icq_sp) {
+ ASC_DBG(0, "Failed to get ICQ carrier\n");
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
-
- /*
- * The first command issued will be placed in the stopper carrier.
- */
- asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC ICQ physical address start value.
@@ -5420,21 +5259,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
/*
* Set-up the RISC->Host Initiator Response Queue (IRQ).
*/
- if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->irq_sp) {
+ ASC_DBG(0, "Failed to get IRQ carrier\n");
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
-
- /*
- * The first command completed by the RISC will be placed in
- * the stopper.
- *
- * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
- * completed the RISC will set the ASC_RQ_STOPPER bit.
- */
- asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC IRQ physical address start value.
@@ -5909,17 +5739,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
/*
* Set-up the Host->RISC Initiator Command Queue (ICQ).
*/
- if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->icq_sp) {
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
-
- /*
- * The first command issued will be placed in the stopper carrier.
- */
- asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC ICQ physical address start value. Initialize the
@@ -5933,21 +5757,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
/*
* Set-up the RISC->Host Initiator Response Queue (IRQ).
*/
- if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
+ if (!asc_dvc->irq_sp) {
asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
return ADV_ERROR;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
-
- /*
- * The first command completed by the RISC will be placed in
- * the stopper.
- *
- * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
- * completed the RISC will set the ASC_RQ_STOPPER bit.
- */
- asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
/*
* Set RISC IRQ physical address start value.
@@ -6134,15 +5948,16 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
*/
static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
{
- struct asc_board *boardp;
+ struct asc_board *boardp = adv_dvc_varp->drv_ptr;
+ u32 srb_tag;
adv_req_t *reqp;
adv_sgblk_t *sgblkp;
struct scsi_cmnd *scp;
- struct Scsi_Host *shost;
- ADV_DCNT resid_cnt;
+ u32 resid_cnt;
+ dma_addr_t sense_addr;
- ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n",
- (ulong)adv_dvc_varp, (ulong)scsiqp);
+ ASC_DBG(1, "adv_dvc_varp 0x%p, scsiqp 0x%p\n",
+ adv_dvc_varp, scsiqp);
ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
/*
@@ -6150,22 +5965,9 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
* completed. The adv_req_t structure actually contains the
* completed ADV_SCSI_REQ_Q structure.
*/
- reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr);
- ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp);
- if (reqp == NULL) {
- ASC_PRINT("adv_isr_callback: reqp is NULL\n");
- return;
- }
+ srb_tag = le32_to_cpu(scsiqp->srb_tag);
+ scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
- /*
- * Get the struct scsi_cmnd structure and Scsi_Host structure for the
- * command that has been completed.
- *
- * Note: The adv_req_t request structure and adv_sgblk_t structure,
- * if any, are dropped, because a board structure pointer can not be
- * determined.
- */
- scp = reqp->cmndp;
ASC_DBG(1, "scp 0x%p\n", scp);
if (scp == NULL) {
ASC_PRINT
@@ -6174,12 +5976,25 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
}
ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
- shost = scp->device->host;
- ASC_STATS(shost, callback);
- ASC_DBG(1, "shost 0x%p\n", shost);
+ reqp = (adv_req_t *)scp->host_scribble;
+ ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp);
+ if (reqp == NULL) {
+ ASC_PRINT("adv_isr_callback: reqp is NULL\n");
+ return;
+ }
+ /*
+ * Remove backreferences to avoid duplicate
+ * command completions.
+ */
+ scp->host_scribble = NULL;
+ reqp->cmndp = NULL;
+
+ ASC_STATS(boardp->shost, callback);
+ ASC_DBG(1, "shost 0x%p\n", boardp->shost);
- boardp = shost_priv(shost);
- BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var);
+ sense_addr = le32_to_cpu(scsiqp->sense_addr);
+ dma_unmap_single(boardp->dev, sense_addr,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/*
* 'done_status' contains the command's ending status.
@@ -6272,18 +6087,10 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
/* Remove 'sgblkp' from the request list. */
reqp->sgblkp = sgblkp->next_sgblkp;
- /* Add 'sgblkp' to the board free list. */
- sgblkp->next_sgblkp = boardp->adv_sgblkp;
- boardp->adv_sgblkp = sgblkp;
+ dma_pool_free(boardp->adv_sgblk_pool, sgblkp,
+ sgblkp->sg_addr);
}
- /*
- * Free the adv_req_t structure used with the command by adding
- * it back to the board free list.
- */
- reqp->next_reqp = boardp->adv_reqp;
- boardp->adv_reqp = reqp;
-
ASC_DBG(1, "done\n");
}
@@ -6312,8 +6119,9 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
uchar int_stat;
ushort target_bit;
ADV_CARR_T *free_carrp;
- ADV_VADDR irq_next_vpa;
+ __le32 irq_next_vpa;
ADV_SCSI_REQ_Q *scsiq;
+ adv_req_t *reqp;
iop_base = asc_dvc->iop_base;
@@ -6356,25 +6164,28 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
* Check if the IRQ stopper carrier contains a completed request.
*/
while (((irq_next_vpa =
- le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) {
+ le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ADV_RQ_DONE) != 0) {
/*
* Get a pointer to the newly completed ADV_SCSI_REQ_Q structure.
* The RISC will have set 'areq_vpa' to a virtual address.
*
- * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr
+ * The firmware will have copied the ADV_SCSI_REQ_Q.scsiq_ptr
* field to the carrier ADV_CARR_T.areq_vpa field. The conversion
- * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr'
+ * below complements the conversion of ADV_SCSI_REQ_Q.scsiq_ptr'
* in AdvExeScsiQueue().
*/
- scsiq = (ADV_SCSI_REQ_Q *)
- ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa));
+ u32 pa_offset = le32_to_cpu(asc_dvc->irq_sp->areq_vpa);
+ ASC_DBG(1, "irq_sp %p areq_vpa %u\n",
+ asc_dvc->irq_sp, pa_offset);
+ reqp = adv_get_reqp(asc_dvc, pa_offset);
+ scsiq = &reqp->scsi_req_q;
/*
* Request finished with good status and the queue was not
* DMAed to host memory by the firmware. Set all status fields
* to indicate good status.
*/
- if ((irq_next_vpa & ASC_RQ_GOOD) != 0) {
+ if ((irq_next_vpa & ADV_RQ_GOOD) != 0) {
scsiq->done_status = QD_NO_ERROR;
scsiq->host_status = scsiq->scsi_status = 0;
scsiq->data_cnt = 0L;
@@ -6386,11 +6197,10 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
* stopper carrier.
*/
free_carrp = asc_dvc->irq_sp;
- asc_dvc->irq_sp = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa));
+ asc_dvc->irq_sp = adv_get_carrier(asc_dvc,
+ ADV_GET_CARRP(irq_next_vpa));
- free_carrp->next_vpa =
- cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist));
+ free_carrp->next_vpa = asc_dvc->carr_freelist->carr_va;
asc_dvc->carr_freelist = free_carrp;
asc_dvc->carr_pending_cnt--;
@@ -6405,7 +6215,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
* Notify the driver of the completed request by passing
* the ADV_SCSI_REQ_Q pointer to its callback function.
*/
- scsiq->a_flag |= ADV_SCSIQ_DONE;
adv_isr_callback(asc_dvc, scsiq);
/*
* Note: After the driver callback function is called, 'scsiq'
@@ -6521,11 +6330,11 @@ AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset)
return byte;
}
-static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
+static bool AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
{
ASC_SCSI_BIT_ID_TYPE org_id;
int i;
- int sta = TRUE;
+ bool sta = true;
AscSetBank(iop_base, 1);
org_id = AscReadChipDvcID(iop_base);
@@ -6539,10 +6348,10 @@ static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
AscSetBank(iop_base, 0);
AscSetChipSyn(iop_base, sdtr_data);
if (AscGetChipSyn(iop_base) != sdtr_data) {
- sta = FALSE;
+ sta = false;
}
} else {
- sta = FALSE;
+ sta = false;
}
AscSetBank(iop_base, 1);
AscWriteChipDvcID(iop_base, org_id);
@@ -6556,12 +6365,12 @@ static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no)
AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data);
}
-static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
+static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
{
EXT_MSG ext_msg;
EXT_MSG out_msg;
ushort halt_q_addr;
- int sdtr_accept;
+ bool sdtr_accept;
ushort int_halt_code;
ASC_SCSI_BIT_ID_TYPE scsi_busy;
ASC_SCSI_BIT_ID_TYPE target_id;
@@ -6603,14 +6412,14 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
boardp->sdtr_data[tid_no] = 0;
}
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) {
if (asc_dvc->pci_fix_asyn_xfer & target_id) {
AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
boardp->sdtr_data[tid_no] = asyn_sdtr;
}
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else if (int_halt_code == ASC_HALT_EXTMSG_IN) {
AscMemWordCopyPtrFromLram(iop_base,
ASCV_MSGIN_BEG,
@@ -6620,10 +6429,10 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
if (ext_msg.msg_type == EXTENDED_MESSAGE &&
ext_msg.msg_req == EXTENDED_SDTR &&
ext_msg.msg_len == MS_SDTR_LEN) {
- sdtr_accept = TRUE;
+ sdtr_accept = true;
if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
- sdtr_accept = FALSE;
+ sdtr_accept = false;
ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET;
}
if ((ext_msg.xfer_period <
@@ -6631,7 +6440,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
|| (ext_msg.xfer_period >
asc_dvc->sdtr_period_tbl[asc_dvc->
max_sdtr_index])) {
- sdtr_accept = FALSE;
+ sdtr_accept = false;
ext_msg.xfer_period =
asc_dvc->sdtr_period_tbl[asc_dvc->
min_sdtr_index];
@@ -6696,7 +6505,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
(ushort)ASC_SCSIQ_B_CNTL),
q_cntl);
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else if (ext_msg.msg_type == EXTENDED_MESSAGE &&
ext_msg.msg_req == EXTENDED_WDTR &&
ext_msg.msg_len == MS_WDTR_LEN) {
@@ -6712,7 +6521,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
(ushort)ASC_SCSIQ_B_CNTL),
q_cntl);
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else {
ext_msg.msg_type = MESSAGE_REJECT;
@@ -6726,7 +6535,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
(ushort)ASC_SCSIQ_B_CNTL),
q_cntl);
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
}
} else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
@@ -6783,7 +6592,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy);
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
AscMemWordCopyPtrFromLram(iop_base,
@@ -6805,7 +6614,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
(ushort)(halt_q_addr +
(ushort)ASC_SCSIQ_B_CNTL), q_cntl);
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
} else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
scsi_status = AscReadLramByte(iop_base,
@@ -6850,166 +6659,9 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
}
}
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
- }
-#if CC_VERY_LONG_SG_LIST
- else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) {
- uchar q_no;
- ushort q_addr;
- uchar sg_wk_q_no;
- uchar first_sg_wk_q_no;
- ASC_SCSI_Q *scsiq; /* Ptr to driver request. */
- ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */
- ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */
- ushort sg_list_dwords;
- ushort sg_entry_cnt;
- uchar next_qp;
- int i;
-
- q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP);
- if (q_no == ASC_QLINK_END)
- return 0;
-
- q_addr = ASC_QNO_TO_QADDR(q_no);
-
- /*
- * Convert the request's SRB pointer to a host ASC_SCSI_REQ
- * structure pointer using a macro provided by the driver.
- * The ASC_SCSI_REQ pointer provides a pointer to the
- * host ASC_SG_HEAD structure.
- */
- /* Read request's SRB pointer. */
- scsiq = (ASC_SCSI_Q *)
- ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base,
- (ushort)
- (q_addr +
- ASC_SCSIQ_D_SRBPTR))));
-
- /*
- * Get request's first and working SG queue.
- */
- sg_wk_q_no = AscReadLramByte(iop_base,
- (ushort)(q_addr +
- ASC_SCSIQ_B_SG_WK_QP));
-
- first_sg_wk_q_no = AscReadLramByte(iop_base,
- (ushort)(q_addr +
- ASC_SCSIQ_B_FIRST_SG_WK_QP));
-
- /*
- * Reset request's working SG queue back to the
- * first SG queue.
- */
- AscWriteLramByte(iop_base,
- (ushort)(q_addr +
- (ushort)ASC_SCSIQ_B_SG_WK_QP),
- first_sg_wk_q_no);
-
- sg_head = scsiq->sg_head;
-
- /*
- * Set sg_entry_cnt to the number of SG elements
- * that will be completed on this interrupt.
- *
- * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1
- * SG elements. The data_cnt and data_addr fields which
- * add 1 to the SG element capacity are not used when
- * restarting SG handling after a halt.
- */
- if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) {
- sg_entry_cnt = ASC_MAX_SG_LIST - 1;
-
- /*
- * Keep track of remaining number of SG elements that
- * will need to be handled on the next interrupt.
- */
- scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1);
- } else {
- sg_entry_cnt = scsiq->remain_sg_entry_cnt;
- scsiq->remain_sg_entry_cnt = 0;
- }
-
- /*
- * Copy SG elements into the list of allocated SG queues.
- *
- * Last index completed is saved in scsiq->next_sg_index.
- */
- next_qp = first_sg_wk_q_no;
- q_addr = ASC_QNO_TO_QADDR(next_qp);
- scsi_sg_q.sg_head_qp = q_no;
- scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
- for (i = 0; i < sg_head->queue_cnt; i++) {
- scsi_sg_q.seq_no = i + 1;
- if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
- sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2);
- sg_entry_cnt -= ASC_SG_LIST_PER_Q;
- /*
- * After very first SG queue RISC FW uses next
- * SG queue first element then checks sg_list_cnt
- * against zero and then decrements, so set
- * sg_list_cnt 1 less than number of SG elements
- * in each SG queue.
- */
- scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
- scsi_sg_q.sg_cur_list_cnt =
- ASC_SG_LIST_PER_Q - 1;
- } else {
- /*
- * This is the last SG queue in the list of
- * allocated SG queues. If there are more
- * SG elements than will fit in the allocated
- * queues, then set the QCSG_SG_XFER_MORE flag.
- */
- if (scsiq->remain_sg_entry_cnt != 0) {
- scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
- } else {
- scsi_sg_q.cntl |= QCSG_SG_XFER_END;
- }
- /* equals sg_entry_cnt * 2 */
- sg_list_dwords = sg_entry_cnt << 1;
- scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
- scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
- sg_entry_cnt = 0;
- }
-
- scsi_sg_q.q_no = next_qp;
- AscMemWordCopyPtrToLram(iop_base,
- q_addr + ASC_SCSIQ_SGHD_CPY_BEG,
- (uchar *)&scsi_sg_q,
- sizeof(ASC_SG_LIST_Q) >> 1);
-
- AscMemDWordCopyPtrToLram(iop_base,
- q_addr + ASC_SGQ_LIST_BEG,
- (uchar *)&sg_head->
- sg_list[scsiq->next_sg_index],
- sg_list_dwords);
-
- scsiq->next_sg_index += ASC_SG_LIST_PER_Q;
-
- /*
- * If the just completed SG queue contained the
- * last SG element, then no more SG queues need
- * to be written.
- */
- if (scsi_sg_q.cntl & QCSG_SG_XFER_END) {
- break;
- }
-
- next_qp = AscReadLramByte(iop_base,
- (ushort)(q_addr +
- ASC_SCSIQ_B_FWD));
- q_addr = ASC_QNO_TO_QADDR(next_qp);
- }
-
- /*
- * Clear the halt condition so the RISC will be restarted
- * after the return.
- */
- AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
- return (0);
+ return;
}
-#endif /* CC_VERY_LONG_SG_LIST */
- return (0);
+ return;
}
/*
@@ -7043,7 +6695,7 @@ DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words)
static uchar
_AscCopyLramScsiDoneQ(PortAddr iop_base,
ushort q_addr,
- ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count)
+ ASC_QDONE_INFO *scsiq, unsigned int max_dma_count)
{
ushort _val;
uchar sg_queue_cnt;
@@ -7070,10 +6722,10 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base,
/*
* Read high word of remain bytes from alternate location.
*/
- scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base,
- (ushort)(q_addr +
- (ushort)
- ASC_SCSIQ_W_ALT_DC1)))
+ scsiq->remain_bytes = (((u32)AscReadLramWord(iop_base,
+ (ushort)(q_addr +
+ (ushort)
+ ASC_SCSIQ_W_ALT_DC1)))
<< 16);
/*
* Read low word of remain bytes from original location.
@@ -7093,25 +6745,24 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base,
*/
static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
{
- struct asc_board *boardp;
+ struct asc_board *boardp = asc_dvc_varp->drv_ptr;
+ u32 srb_tag;
struct scsi_cmnd *scp;
- struct Scsi_Host *shost;
ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep);
ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep);
- scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr);
+ /*
+ * Decrease the srb_tag by 1 to find the SCSI command
+ */
+ srb_tag = qdonep->d2.srb_tag - 1;
+ scp = scsi_host_find_tag(boardp->shost, srb_tag);
if (!scp)
return;
ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
- shost = scp->device->host;
- ASC_STATS(shost, callback);
- ASC_DBG(1, "shost 0x%p\n", shost);
-
- boardp = shost_priv(shost);
- BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
+ ASC_STATS(boardp->shost, callback);
dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
@@ -7220,7 +6871,7 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc)
uchar cur_target_qng;
ASC_QDONE_INFO scsiq_buf;
ASC_QDONE_INFO *scsiq;
- int false_overrun;
+ bool false_overrun;
iop_base = asc_dvc->iop_base;
n_q_used = 1;
@@ -7294,14 +6945,17 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc)
scsiq->d3.done_stat = QD_WITH_ERROR;
goto FATAL_ERR_QDONE;
}
- if ((scsiq->d2.srb_ptr == 0UL) ||
+ if ((scsiq->d2.srb_tag == 0UL) ||
((scsiq->q_status & QS_ABORTED) != 0)) {
return (0x11);
} else if (scsiq->q_status == QS_DONE) {
- false_overrun = FALSE;
+ /*
+ * This is also curious.
+ * false_overrun will _always_ be set to 'false'
+ */
+ false_overrun = false;
if (scsiq->extra_bytes != 0) {
- scsiq->remain_bytes +=
- (ADV_DCNT)scsiq->extra_bytes;
+ scsiq->remain_bytes += scsiq->extra_bytes;
}
if (scsiq->d3.done_stat == QD_WITH_ERROR) {
if (scsiq->d3.host_stat ==
@@ -7372,23 +7026,23 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
uchar host_flag;
iop_base = asc_dvc->iop_base;
- int_pending = FALSE;
+ int_pending = ASC_FALSE;
if (AscIsIntPending(iop_base) == 0)
return int_pending;
if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) {
- return ERR;
+ return ASC_ERROR;
}
if (asc_dvc->in_critical_cnt != 0) {
AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
- return ERR;
+ return ASC_ERROR;
}
if (asc_dvc->is_in_int) {
AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
- return ERR;
+ return ASC_ERROR;
}
- asc_dvc->is_in_int = TRUE;
+ asc_dvc->is_in_int = true;
ctrl_reg = AscGetChipControl(iop_base);
saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
CC_SINGLE_STEP | CC_DIAG | CC_TEST));
@@ -7396,7 +7050,7 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
if (chipstat & CSW_SCSI_RESET_LATCH) {
if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) {
int i = 10;
- int_pending = TRUE;
+ int_pending = ASC_TRUE;
asc_dvc->sdtr_done = 0;
saved_ctrl_reg &= (uchar)(~CC_HALT);
while ((AscGetChipStatus(iop_base) &
@@ -7418,15 +7072,11 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
(uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR));
if ((chipstat & CSW_INT_PENDING) || (int_pending)) {
AscAckInterrupt(iop_base);
- int_pending = TRUE;
+ int_pending = ASC_TRUE;
if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) {
- if (AscIsrChipHalted(asc_dvc) == ERR) {
- goto ISR_REPORT_QDONE_FATAL_ERROR;
- } else {
- saved_ctrl_reg &= (uchar)(~CC_HALT);
- }
+ AscIsrChipHalted(asc_dvc);
+ saved_ctrl_reg &= (uchar)(~CC_HALT);
} else {
- ISR_REPORT_QDONE_FATAL_ERROR:
if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
while (((status =
AscIsrQDone(asc_dvc)) & 0x01) != 0) {
@@ -7440,20 +7090,20 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
} while (status == 0x11);
}
if ((status & 0x80) != 0)
- int_pending = ERR;
+ int_pending = ASC_ERROR;
}
}
AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
AscSetChipLramAddr(iop_base, saved_ram_addr);
AscSetChipControl(iop_base, saved_ctrl_reg);
- asc_dvc->is_in_int = FALSE;
+ asc_dvc->is_in_int = false;
return int_pending;
}
/*
* advansys_reset()
*
- * Reset the bus associated with the command 'scp'.
+ * Reset the host associated with the command 'scp'.
*
* This function runs its own thread. Interrupts must be blocked but
* sleeping is allowed and no locking other than for host structures is
@@ -7471,7 +7121,7 @@ static int advansys_reset(struct scsi_cmnd *scp)
ASC_STATS(shost, reset);
- scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n");
+ scmd_printk(KERN_INFO, scp, "SCSI host reset started...\n");
if (ASC_NARROW_BOARD(boardp)) {
ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
@@ -7482,20 +7132,19 @@ static int advansys_reset(struct scsi_cmnd *scp)
/* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */
if (asc_dvc->err_code || !asc_dvc->overrun_dma) {
- scmd_printk(KERN_INFO, scp, "SCSI bus reset error: "
+ scmd_printk(KERN_INFO, scp, "SCSI host reset error: "
"0x%x, status: 0x%x\n", asc_dvc->err_code,
status);
ret = FAILED;
} else if (status) {
- scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: "
+ scmd_printk(KERN_INFO, scp, "SCSI host reset warning: "
"0x%x\n", status);
} else {
- scmd_printk(KERN_INFO, scp, "SCSI bus reset "
+ scmd_printk(KERN_INFO, scp, "SCSI host reset "
"successful\n");
}
ASC_DBG(1, "after AscInitAsc1000Driver()\n");
- spin_lock_irqsave(shost->host_lock, flags);
} else {
/*
* If the suggest reset bus flags are set, then reset the bus.
@@ -7504,28 +7153,25 @@ static int advansys_reset(struct scsi_cmnd *scp)
ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
/*
- * Reset the target's SCSI bus.
+ * Reset the chip and SCSI bus.
*/
ASC_DBG(1, "before AdvResetChipAndSB()\n");
switch (AdvResetChipAndSB(adv_dvc)) {
case ASC_TRUE:
- scmd_printk(KERN_INFO, scp, "SCSI bus reset "
+ scmd_printk(KERN_INFO, scp, "SCSI host reset "
"successful\n");
break;
case ASC_FALSE:
default:
- scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n");
+ scmd_printk(KERN_INFO, scp, "SCSI host reset error\n");
ret = FAILED;
break;
}
spin_lock_irqsave(shost->host_lock, flags);
AdvISR(adv_dvc);
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
- /* Save the time of the most recently completed reset. */
- boardp->last_reset = jiffies;
- spin_unlock_irqrestore(shost->host_lock, flags);
-
ASC_DBG(1, "ret %d\n", ret);
return ret;
@@ -7584,9 +7230,10 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id)
struct Scsi_Host *shost = dev_id;
struct asc_board *boardp = shost_priv(shost);
irqreturn_t result = IRQ_NONE;
+ unsigned long flags;
ASC_DBG(2, "boardp 0x%p\n", boardp);
- spin_lock(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, flags);
if (ASC_NARROW_BOARD(boardp)) {
if (AscIsIntPending(shost->io_port)) {
result = IRQ_HANDLED;
@@ -7601,38 +7248,38 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id)
ASC_STATS(shost, interrupt);
}
}
- spin_unlock(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, flags);
ASC_DBG(1, "end\n");
return result;
}
-static int AscHostReqRiscHalt(PortAddr iop_base)
+static bool AscHostReqRiscHalt(PortAddr iop_base)
{
int count = 0;
- int sta = 0;
+ bool sta = false;
uchar saved_stop_code;
if (AscIsChipHalted(iop_base))
- return (1);
+ return true;
saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP);
do {
if (AscIsChipHalted(iop_base)) {
- sta = 1;
+ sta = true;
break;
}
mdelay(100);
} while (count++ < 20);
AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
- return (sta);
+ return sta;
}
-static int
+static bool
AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data)
{
- int sta = FALSE;
+ bool sta = false;
if (AscHostReqRiscHalt(iop_base)) {
sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
@@ -7851,13 +7498,17 @@ static int advansys_slave_configure(struct scsi_device *sdev)
return 0;
}
-static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
+static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
{
struct asc_board *board = shost_priv(scp->device->host);
+
scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- dma_cache_sync(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
+ ASC_DBG(1, "failed to map sense buffer\n");
+ return 0;
+ }
return cpu_to_le32(scp->SCp.dma_handle);
}
@@ -7866,17 +7517,16 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
{
struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var;
int use_sg;
+ u32 srb_tag;
memset(asc_scsi_q, 0, sizeof(*asc_scsi_q));
/*
- * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'.
+ * Set the srb_tag to the command tag + 1, as
+ * srb_tag '0' is used internally by the chip.
*/
- asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp);
- if (asc_scsi_q->q2.srb_ptr == BAD_SRB) {
- scp->result = HOST_BYTE(DID_SOFT_ERROR);
- return ASC_ERROR;
- }
+ srb_tag = scp->request->tag + 1;
+ asc_scsi_q->q2.srb_tag = srb_tag;
/*
* Build the ASC_SCSI_Q request.
@@ -7887,8 +7537,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
asc_scsi_q->q1.target_lun = scp->device->lun;
asc_scsi_q->q2.target_ix =
ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
- asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp);
+ asc_scsi_q->q1.sense_addr = asc_get_sense_buffer_dma(scp);
asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
+ if (!asc_scsi_q->q1.sense_addr)
+ return ASC_BUSY;
/*
* If there are any outstanding requests for the current target,
@@ -7910,7 +7562,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
/* Build ASC_SCSI_Q */
use_sg = scsi_dma_map(scp);
- if (use_sg != 0) {
+ if (use_sg < 0) {
+ ASC_DBG(1, "failed to map sglist\n");
+ return ASC_BUSY;
+ } else if (use_sg > 0) {
int sgcnt;
struct scatterlist *slp;
struct asc_sg_head *asc_sg_head;
@@ -7975,20 +7630,19 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
* ADV_ERROR(-1) - SG List creation failed
*/
static int
-adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
- int use_sg)
+adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp,
+ ADV_SCSI_REQ_Q *scsiqp, struct scsi_cmnd *scp, int use_sg)
{
- adv_sgblk_t *sgblkp;
- ADV_SCSI_REQ_Q *scsiqp;
+ adv_sgblk_t *sgblkp, *prev_sgblkp;
struct scatterlist *slp;
int sg_elem_cnt;
ADV_SG_BLOCK *sg_block, *prev_sg_block;
- ADV_PADDR sg_block_paddr;
+ dma_addr_t sgblk_paddr;
int i;
- scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
slp = scsi_sglist(scp);
sg_elem_cnt = use_sg;
+ prev_sgblkp = NULL;
prev_sg_block = NULL;
reqp->sgblkp = NULL;
@@ -7998,7 +7652,9 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
* list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK
* (15) scatter-gather elements.
*/
- if ((sgblkp = boardp->adv_sgblkp) == NULL) {
+ sgblkp = dma_pool_alloc(boardp->adv_sgblk_pool, GFP_ATOMIC,
+ &sgblk_paddr);
+ if (!sgblkp) {
ASC_DBG(1, "no free adv_sgblk_t\n");
ASC_STATS(scp->device->host, adv_build_nosg);
@@ -8009,24 +7665,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
while ((sgblkp = reqp->sgblkp) != NULL) {
/* Remove 'sgblkp' from the request list. */
reqp->sgblkp = sgblkp->next_sgblkp;
-
- /* Add 'sgblkp' to the board free list. */
- sgblkp->next_sgblkp = boardp->adv_sgblkp;
- boardp->adv_sgblkp = sgblkp;
+ sgblkp->next_sgblkp = NULL;
+ dma_pool_free(boardp->adv_sgblk_pool, sgblkp,
+ sgblkp->sg_addr);
}
return ASC_BUSY;
}
-
/* Complete 'adv_sgblk_t' board allocation. */
- boardp->adv_sgblkp = sgblkp->next_sgblkp;
+ sgblkp->sg_addr = sgblk_paddr;
sgblkp->next_sgblkp = NULL;
-
- /*
- * Get 8 byte aligned virtual and physical addresses
- * for the allocated ADV_SG_BLOCK structure.
- */
- sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block);
- sg_block_paddr = virt_to_bus(sg_block);
+ sg_block = &sgblkp->sg_block;
/*
* Check if this is the first 'adv_sgblk_t' for the
@@ -8041,17 +7689,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
* address pointers.
*/
scsiqp->sg_list_ptr = sg_block;
- scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr);
+ scsiqp->sg_real_addr = cpu_to_le32(sgblk_paddr);
} else {
/* Request's second or later scatter-gather block. */
- sgblkp->next_sgblkp = reqp->sgblkp;
- reqp->sgblkp = sgblkp;
+ prev_sgblkp->next_sgblkp = sgblkp;
/*
* Point the previous ADV_SG_BLOCK structure to
* the newly allocated ADV_SG_BLOCK structure.
*/
- prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr);
+ prev_sg_block->sg_ptr = cpu_to_le32(sgblk_paddr);
}
for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
@@ -8062,15 +7709,19 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
ASC_STATS_ADD(scp->device->host, xfer_sect,
DIV_ROUND_UP(sg_dma_len(slp), 512));
- if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */
+ if (--sg_elem_cnt == 0) {
+ /*
+ * Last ADV_SG_BLOCK and scatter-gather entry.
+ */
sg_block->sg_cnt = i + 1;
- sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
+ sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
return ADV_SUCCESS;
}
slp++;
}
sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
prev_sg_block = sg_block;
+ prev_sgblkp = sgblkp;
}
}
@@ -8080,38 +7731,35 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
* If an adv_req_t can not be allocated to issue the request,
* then return ASC_BUSY. If an error occurs, then return ASC_ERROR.
*
- * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the
+ * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the
* microcode for DMA addresses or math operations are byte swapped
* to little-endian order.
*/
static int
adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
- ADV_SCSI_REQ_Q **adv_scsiqpp)
+ adv_req_t **adv_reqpp)
{
+ u32 srb_tag = scp->request->tag;
adv_req_t *reqp;
ADV_SCSI_REQ_Q *scsiqp;
- int i;
int ret;
int use_sg;
+ dma_addr_t sense_addr;
/*
* Allocate an adv_req_t structure from the board to execute
* the command.
*/
- if (boardp->adv_reqp == NULL) {
+ reqp = &boardp->adv_reqp[srb_tag];
+ if (reqp->cmndp && reqp->cmndp != scp ) {
ASC_DBG(1, "no free adv_req_t\n");
ASC_STATS(scp->device->host, adv_build_noreq);
return ASC_BUSY;
- } else {
- reqp = boardp->adv_reqp;
- boardp->adv_reqp = reqp->next_reqp;
- reqp->next_reqp = NULL;
}
- /*
- * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers.
- */
- scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
+ reqp->req_addr = boardp->adv_reqp_addr + (srb_tag * sizeof(adv_req_t));
+
+ scsiqp = &reqp->scsi_req_q;
/*
* Initialize the structure.
@@ -8119,14 +7767,15 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0;
/*
- * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure.
+ * Set the srb_tag to the command tag.
*/
- scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp);
+ scsiqp->srb_tag = srb_tag;
/*
- * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure.
+ * Set 'host_scribble' to point to the adv_req_t structure.
*/
reqp->cmndp = scp;
+ scp->host_scribble = (void *)reqp;
/*
* Build the ADV_SCSI_REQ_Q request.
@@ -8135,28 +7784,38 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
/* Set CDB length and copy it to the request structure. */
scsiqp->cdb_len = scp->cmd_len;
/* Copy first 12 CDB bytes to cdb[]. */
- for (i = 0; i < scp->cmd_len && i < 12; i++) {
- scsiqp->cdb[i] = scp->cmnd[i];
- }
+ memcpy(scsiqp->cdb, scp->cmnd, scp->cmd_len < 12 ? scp->cmd_len : 12);
/* Copy last 4 CDB bytes, if present, to cdb16[]. */
- for (; i < scp->cmd_len; i++) {
- scsiqp->cdb16[i - 12] = scp->cmnd[i];
+ if (scp->cmd_len > 12) {
+ int cdb16_len = scp->cmd_len - 12;
+
+ memcpy(scsiqp->cdb16, &scp->cmnd[12], cdb16_len);
}
scsiqp->target_id = scp->device->id;
scsiqp->target_lun = scp->device->lun;
- scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0]));
- scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
+ sense_addr = dma_map_single(boardp->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(boardp->dev, sense_addr)) {
+ ASC_DBG(1, "failed to map sense buffer\n");
+ ASC_STATS(scp->device->host, adv_build_noreq);
+ return ASC_BUSY;
+ }
+ scsiqp->sense_addr = cpu_to_le32(sense_addr);
+ scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
/* Build ADV_SCSI_REQ_Q */
use_sg = scsi_dma_map(scp);
- if (use_sg == 0) {
+ if (use_sg < 0) {
+ ASC_DBG(1, "failed to map SG list\n");
+ ASC_STATS(scp->device->host, adv_build_noreq);
+ return ASC_BUSY;
+ } else if (use_sg == 0) {
/* Zero-length transfer */
reqp->sgblkp = NULL;
scsiqp->data_cnt = 0;
- scsiqp->vdata_addr = NULL;
scsiqp->data_addr = 0;
scsiqp->sg_list_ptr = NULL;
@@ -8168,27 +7827,20 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
scp->device->host->sg_tablesize);
scsi_dma_unmap(scp);
scp->result = HOST_BYTE(DID_ERROR);
-
- /*
- * Free the 'adv_req_t' structure by adding it back
- * to the board free list.
- */
- reqp->next_reqp = boardp->adv_reqp;
- boardp->adv_reqp = reqp;
+ reqp->cmndp = NULL;
+ scp->host_scribble = NULL;
return ASC_ERROR;
}
scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp));
- ret = adv_get_sglist(boardp, reqp, scp, use_sg);
+ ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg);
if (ret != ADV_SUCCESS) {
- /*
- * Free the adv_req_t structure by adding it back to
- * the board free list.
- */
- reqp->next_reqp = boardp->adv_reqp;
- boardp->adv_reqp = reqp;
+ scsi_dma_unmap(scp);
+ scp->result = HOST_BYTE(DID_ERROR);
+ reqp->cmndp = NULL;
+ scp->host_scribble = NULL;
return ret;
}
@@ -8201,7 +7853,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
- *adv_scsiqpp = scsiqp;
+ *adv_reqpp = reqp;
return ASC_NOERROR;
}
@@ -8358,8 +8010,8 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
int i;
ASC_SG_HEAD *sg_head;
ASC_SG_LIST_Q scsi_sg_q;
- ASC_DCNT saved_data_addr;
- ASC_DCNT saved_data_cnt;
+ __le32 saved_data_addr;
+ __le32 saved_data_cnt;
PortAddr iop_base;
ushort sg_list_dwords;
ushort sg_index;
@@ -8371,42 +8023,15 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
sg_head = scsiq->sg_head;
saved_data_addr = scsiq->q1.data_addr;
saved_data_cnt = scsiq->q1.data_cnt;
- scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr;
- scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes;
-#if CC_VERY_LONG_SG_LIST
+ scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr);
+ scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes);
/*
- * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST
- * then not all SG elements will fit in the allocated queues.
- * The rest of the SG elements will be copied when the RISC
- * completes the SG elements that fit and halts.
+ * Set sg_entry_cnt to be the number of SG elements that
+ * will fit in the allocated SG queues. It is minus 1, because
+ * the first SG element is handled above.
*/
- if (sg_head->entry_cnt > ASC_MAX_SG_LIST) {
- /*
- * Set sg_entry_cnt to be the number of SG elements that
- * will fit in the allocated SG queues. It is minus 1, because
- * the first SG element is handled above. ASC_MAX_SG_LIST is
- * already inflated by 1 to account for this. For example it
- * may be 50 which is 1 + 7 queues * 7 SG elements.
- */
- sg_entry_cnt = ASC_MAX_SG_LIST - 1;
+ sg_entry_cnt = sg_head->entry_cnt - 1;
- /*
- * Keep track of remaining number of SG elements that will
- * need to be handled from a_isr.c.
- */
- scsiq->remain_sg_entry_cnt =
- sg_head->entry_cnt - ASC_MAX_SG_LIST;
- } else {
-#endif /* CC_VERY_LONG_SG_LIST */
- /*
- * Set sg_entry_cnt to be the number of SG elements that
- * will fit in the allocated SG queues. It is minus 1, because
- * the first SG element is handled above.
- */
- sg_entry_cnt = sg_head->entry_cnt - 1;
-#if CC_VERY_LONG_SG_LIST
- }
-#endif /* CC_VERY_LONG_SG_LIST */
if (sg_entry_cnt != 0) {
scsiq->q1.cntl |= QC_SG_HEAD;
q_addr = ASC_QNO_TO_QADDR(q_no);
@@ -8431,21 +8056,7 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
ASC_SG_LIST_PER_Q - 1;
}
} else {
-#if CC_VERY_LONG_SG_LIST
- /*
- * This is the last SG queue in the list of
- * allocated SG queues. If there are more
- * SG elements than will fit in the allocated
- * queues, then set the QCSG_SG_XFER_MORE flag.
- */
- if (sg_head->entry_cnt > ASC_MAX_SG_LIST) {
- scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
- } else {
-#endif /* CC_VERY_LONG_SG_LIST */
- scsi_sg_q.cntl |= QCSG_SG_XFER_END;
-#if CC_VERY_LONG_SG_LIST
- }
-#endif /* CC_VERY_LONG_SG_LIST */
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
sg_list_dwords = sg_entry_cnt << 1;
if (i == 0) {
scsi_sg_q.sg_list_cnt = sg_entry_cnt;
@@ -8550,9 +8161,9 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
PortAddr iop_base;
int sta;
int n_q_required;
- int disable_syn_offset_one_fix;
+ bool disable_syn_offset_one_fix;
int i;
- ASC_PADDR addr;
+ u32 addr;
ushort sg_entry_cnt = 0;
ushort sg_entry_cnt_minus_one = 0;
uchar target_ix;
@@ -8562,12 +8173,12 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
uchar scsi_cmd;
uchar disable_cmd;
ASC_SG_HEAD *sg_head;
- ASC_DCNT data_cnt;
+ unsigned long data_cnt;
iop_base = asc_dvc->iop_base;
sg_head = scsiq->sg_head;
if (asc_dvc->err_code != 0)
- return (ERR);
+ return ASC_ERROR;
scsiq->q1.q_no = 0;
if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) {
scsiq->q1.extra_bytes = 0;
@@ -8593,46 +8204,41 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
}
if (asc_dvc->in_critical_cnt != 0) {
AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
- return (ERR);
+ return ASC_ERROR;
}
asc_dvc->in_critical_cnt++;
if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
if ((sg_entry_cnt = sg_head->entry_cnt) == 0) {
asc_dvc->in_critical_cnt--;
- return (ERR);
+ return ASC_ERROR;
}
-#if !CC_VERY_LONG_SG_LIST
if (sg_entry_cnt > ASC_MAX_SG_LIST) {
asc_dvc->in_critical_cnt--;
- return (ERR);
+ return ASC_ERROR;
}
-#endif /* !CC_VERY_LONG_SG_LIST */
if (sg_entry_cnt == 1) {
- scsiq->q1.data_addr =
- (ADV_PADDR)sg_head->sg_list[0].addr;
- scsiq->q1.data_cnt =
- (ADV_DCNT)sg_head->sg_list[0].bytes;
+ scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr);
+ scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes);
scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
}
sg_entry_cnt_minus_one = sg_entry_cnt - 1;
}
scsi_cmd = scsiq->cdbptr[0];
- disable_syn_offset_one_fix = FALSE;
+ disable_syn_offset_one_fix = false;
if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) &&
!(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) {
if (scsiq->q1.cntl & QC_SG_HEAD) {
data_cnt = 0;
for (i = 0; i < sg_entry_cnt; i++) {
- data_cnt +=
- (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i].
- bytes);
+ data_cnt += le32_to_cpu(sg_head->sg_list[i].
+ bytes);
}
} else {
data_cnt = le32_to_cpu(scsiq->q1.data_cnt);
}
if (data_cnt != 0UL) {
if (data_cnt < 512UL) {
- disable_syn_offset_one_fix = TRUE;
+ disable_syn_offset_one_fix = true;
} else {
for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST;
i++) {
@@ -8643,7 +8249,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
}
if (scsi_cmd == disable_cmd) {
disable_syn_offset_one_fix =
- TRUE;
+ true;
break;
}
}
@@ -8662,12 +8268,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
if ((scsi_cmd == READ_6) ||
(scsi_cmd == READ_10)) {
- addr =
- (ADV_PADDR)le32_to_cpu(sg_head->
+ addr = le32_to_cpu(sg_head->
sg_list
[sg_entry_cnt_minus_one].
addr) +
- (ADV_DCNT)le32_to_cpu(sg_head->
+ le32_to_cpu(sg_head->
sg_list
[sg_entry_cnt_minus_one].
bytes);
@@ -8688,8 +8293,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
sg_list
[sg_entry_cnt_minus_one].
bytes);
- data_cnt -=
- (ASC_DCNT) extra_bytes;
+ data_cnt -= extra_bytes;
sg_head->
sg_list
[sg_entry_cnt_minus_one].
@@ -8700,16 +8304,6 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
}
}
sg_head->entry_to_copy = sg_head->entry_cnt;
-#if CC_VERY_LONG_SG_LIST
- /*
- * Set the sg_entry_cnt to the maximum possible. The rest of
- * the SG elements will be copied when the RISC completes the
- * SG elements that fit and halts.
- */
- if (sg_entry_cnt > ASC_MAX_SG_LIST) {
- sg_entry_cnt = ASC_MAX_SG_LIST;
- }
-#endif /* CC_VERY_LONG_SG_LIST */
n_q_required = AscSgListToQueue(sg_entry_cnt);
if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >=
(uint) n_q_required)
@@ -8744,8 +8338,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
== 0) {
scsiq->q2.tag_code |=
ASC_TAG_FLAG_EXTRA_BYTES;
- data_cnt -= (ASC_DCNT)
- extra_bytes;
+ data_cnt -= extra_bytes;
scsiq->q1.data_cnt =
cpu_to_le32
(data_cnt);
@@ -8780,7 +8373,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
* If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be
* set to SCSI_MAX_RETRY.
*
- * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode
+ * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the microcode
* for DMA addresses or math operations are byte swapped to little-endian
* order.
*
@@ -8791,11 +8384,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
* ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure
* host IC error.
*/
-static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
+static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
{
AdvPortAddr iop_base;
- ADV_PADDR req_paddr;
ADV_CARR_T *new_carrp;
+ ADV_SCSI_REQ_Q *scsiq = &reqp->scsi_req_q;
/*
* The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID.
@@ -8812,39 +8405,19 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
* Allocate a carrier ensuring at least one carrier always
* remains on the freelist and initialize fields.
*/
- if ((new_carrp = asc_dvc->carr_freelist) == NULL) {
+ new_carrp = adv_get_next_carrier(asc_dvc);
+ if (!new_carrp) {
+ ASC_DBG(1, "No free carriers\n");
return ADV_BUSY;
}
- asc_dvc->carr_freelist = (ADV_CARR_T *)
- ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa));
- asc_dvc->carr_pending_cnt++;
-
- /*
- * Set the carrier to be a stopper by setting 'next_vpa'
- * to the stopper value. The current stopper will be changed
- * below to point to the new stopper.
- */
- new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
- /*
- * Clear the ADV_SCSI_REQ_Q done flag.
- */
- scsiq->a_flag &= ~ADV_SCSIQ_DONE;
-
- req_paddr = virt_to_bus(scsiq);
- BUG_ON(req_paddr & 31);
- /* Wait for assertion before making little-endian */
- req_paddr = cpu_to_le32(req_paddr);
+ asc_dvc->carr_pending_cnt++;
/* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */
- scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq));
- scsiq->scsiq_rptr = req_paddr;
+ scsiq->scsiq_ptr = cpu_to_le32(scsiq->srb_tag);
+ scsiq->scsiq_rptr = cpu_to_le32(reqp->req_addr);
- scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp));
- /*
- * Every ADV_CARR_T.carr_pa is byte swapped to little-endian
- * order during initialization.
- */
+ scsiq->carr_va = asc_dvc->icq_sp->carr_va;
scsiq->carr_pa = asc_dvc->icq_sp->carr_pa;
/*
@@ -8852,7 +8425,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
* the microcode. The newly allocated stopper will become the new
* stopper.
*/
- asc_dvc->icq_sp->areq_vpa = req_paddr;
+ asc_dvc->icq_sp->areq_vpa = scsiq->scsiq_rptr;
/*
* Set the 'next_vpa' pointer for the old stopper to be the
@@ -8907,11 +8480,10 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
struct asc_scsi_q asc_scsi_q;
- /* asc_build_req() can not return ASC_BUSY. */
ret = asc_build_req(boardp, scp, &asc_scsi_q);
- if (ret == ASC_ERROR) {
+ if (ret != ASC_NOERROR) {
ASC_STATS(scp->device->host, build_error);
- return ASC_ERROR;
+ return ret;
}
ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q);
@@ -8919,9 +8491,9 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
err_code = asc_dvc->err_code;
} else {
ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
- ADV_SCSI_REQ_Q *adv_scsiqp;
+ adv_req_t *adv_reqp;
- switch (adv_build_req(boardp, scp, &adv_scsiqp)) {
+ switch (adv_build_req(boardp, scp, &adv_reqp)) {
case ASC_NOERROR:
ASC_DBG(3, "adv_build_req ASC_NOERROR\n");
break;
@@ -8941,7 +8513,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
return ASC_ERROR;
}
- ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp);
+ ret = AdvExeScsiQueue(adv_dvc, adv_reqp);
err_code = adv_dvc->err_code;
}
@@ -8956,6 +8528,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n");
break;
case ASC_BUSY:
+ ASC_DBG(1, "ExeScsiQueue() ASC_BUSY\n");
ASC_STATS(scp->device->host, exe_busy);
break;
case ASC_ERROR:
@@ -9122,7 +8695,7 @@ static int AscStopQueueExe(PortAddr iop_base)
return (0);
}
-static ASC_DCNT AscGetMaxDmaCount(ushort bus_type)
+static unsigned int AscGetMaxDmaCount(ushort bus_type)
{
if (bus_type & ASC_IS_ISA)
return ASC_MAX_ISA_DMA_COUNT;
@@ -9183,15 +8756,13 @@ static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
}
#endif /* CONFIG_ISA */
-static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
+static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
{
int i;
PortAddr iop_base;
- ushort warn_code;
uchar chip_version;
iop_base = asc_dvc->iop_base;
- warn_code = 0;
asc_dvc->err_code = 0;
if ((asc_dvc->bus_type &
(ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
@@ -9205,7 +8776,7 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
/* asc_dvc->init_state initialized in AscInitGetConfig(). */
asc_dvc->sdtr_done = 0;
asc_dvc->cur_total_qng = 0;
- asc_dvc->is_in_int = 0;
+ asc_dvc->is_in_int = false;
asc_dvc->in_critical_cnt = 0;
asc_dvc->last_q_shortage = 0;
asc_dvc->use_tagged_qng = 0;
@@ -9267,7 +8838,6 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L;
asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG;
}
- return warn_code;
}
static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
@@ -9385,7 +8955,7 @@ static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
int retry;
retry = 0;
- while (TRUE) {
+ while (true) {
AscSetChipEEPData(iop_base, data_reg);
mdelay(1);
read_back = AscGetChipEEPData(iop_base);
@@ -9521,7 +9091,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
int n_error;
retry = 0;
- while (TRUE) {
+ while (true) {
if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
bus_type)) == 0) {
break;
@@ -9533,7 +9103,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
return n_error;
}
-static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
+static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
{
ASCEEP_CONFIG eep_config_buf;
ASCEEP_CONFIG *eep_config;
@@ -9548,13 +9118,13 @@ static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
warn_code = 0;
AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
AscStopQueueExe(iop_base);
- if ((AscStopChip(iop_base) == FALSE) ||
+ if ((AscStopChip(iop_base)) ||
(AscGetChipScsiCtrl(iop_base) != 0)) {
asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
AscResetChipAndScsiBus(asc_dvc);
mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */
}
- if (AscIsChipHalted(iop_base) == FALSE) {
+ if (!AscIsChipHalted(iop_base)) {
asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
return (warn_code);
}
@@ -9709,8 +9279,8 @@ static int AscInitGetConfig(struct Scsi_Host *shost)
return asc_dvc->err_code;
if (AscFindSignature(asc_dvc->iop_base)) {
- warn_code |= AscInitAscDvcVar(asc_dvc);
- warn_code |= AscInitFromEEP(asc_dvc);
+ AscInitAscDvcVar(asc_dvc);
+ warn_code = AscInitFromEEP(asc_dvc);
asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
@@ -9866,6 +9436,7 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
* on big-endian platforms so char fields read as words are actually being
* unswapped on big-endian platforms.
*/
+#ifdef CONFIG_PCI
static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */
0x0000, /* cfg_msw */
@@ -10202,7 +9773,6 @@ static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = {
0 /* 63 reserved */
};
-#ifdef CONFIG_PCI
/*
* Wait for EEPROM command to complete
*/
@@ -11232,7 +10802,7 @@ static struct scsi_host_template advansys_template = {
.name = DRV_NAME,
.info = advansys_info,
.queuecommand = advansys_queuecommand,
- .eh_bus_reset_handler = advansys_reset,
+ .eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
.slave_configure = advansys_slave_configure,
/*
@@ -11240,7 +10810,7 @@ static struct scsi_host_template advansys_template = {
* must be set. The flag will be cleared in advansys_board_found
* for non-ISA adapters.
*/
- .unchecked_isa_dma = 1,
+ .unchecked_isa_dma = true,
/*
* All adapters controlled by this driver are capable of large
* scatter-gather lists. According to the mid-level SCSI documentation
@@ -11249,26 +10819,25 @@ static struct scsi_host_template advansys_template = {
* by enabling clustering, I/O throughput increases as well.
*/
.use_clustering = ENABLE_CLUSTERING,
+ .use_blk_tags = 1,
};
static int advansys_wide_init_chip(struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
- int req_cnt = 0;
- adv_req_t *reqp = NULL;
- int sg_cnt = 0;
- adv_sgblk_t *sgp;
+ size_t sgblk_pool_size;
int warn_code, err_code;
/*
* Allocate buffer carrier structures. The total size
- * is about 4 KB, so allocate all at once.
+ * is about 8 KB, so allocate all at once.
*/
- adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL);
- ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf);
+ adv_dvc->carrier = dma_alloc_coherent(board->dev,
+ ADV_CARRIER_BUFSIZE, &adv_dvc->carrier_addr, GFP_KERNEL);
+ ASC_DBG(1, "carrier 0x%p\n", adv_dvc->carrier);
- if (!adv_dvc->carrier_buf)
+ if (!adv_dvc->carrier)
goto kmalloc_failed;
/*
@@ -11276,54 +10845,34 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost)
* board. The total size is about 16 KB, so allocate all at once.
* If the allocation fails decrement and try again.
*/
- for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) {
- reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL);
-
- ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt,
- (ulong)sizeof(adv_req_t) * req_cnt);
-
- if (reqp)
- break;
+ board->adv_reqp_size = adv_dvc->max_host_qng * sizeof(adv_req_t);
+ if (board->adv_reqp_size & 0x1f) {
+ ASC_DBG(1, "unaligned reqp %lu bytes\n", sizeof(adv_req_t));
+ board->adv_reqp_size = ADV_32BALIGN(board->adv_reqp_size);
}
+ board->adv_reqp = dma_alloc_coherent(board->dev, board->adv_reqp_size,
+ &board->adv_reqp_addr, GFP_KERNEL);
- if (!reqp)
+ if (!board->adv_reqp)
goto kmalloc_failed;
- adv_dvc->orig_reqp = reqp;
+ ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", board->adv_reqp,
+ adv_dvc->max_host_qng, board->adv_reqp_size);
/*
* Allocate up to ADV_TOT_SG_BLOCK request structures for
* the Wide board. Each structure is about 136 bytes.
*/
- board->adv_sgblkp = NULL;
- for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) {
- sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL);
+ sgblk_pool_size = sizeof(adv_sgblk_t) * ADV_TOT_SG_BLOCK;
+ board->adv_sgblk_pool = dma_pool_create("adv_sgblk", board->dev,
+ sgblk_pool_size, 32, 0);
- if (!sgp)
- break;
-
- sgp->next_sgblkp = board->adv_sgblkp;
- board->adv_sgblkp = sgp;
-
- }
-
- ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t),
- sizeof(adv_sgblk_t) * sg_cnt);
+ ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", ADV_TOT_SG_BLOCK,
+ sizeof(adv_sgblk_t), sgblk_pool_size);
- if (!board->adv_sgblkp)
+ if (!board->adv_sgblk_pool)
goto kmalloc_failed;
- /*
- * Point 'adv_reqp' to the request structures and
- * link them together.
- */
- req_cnt--;
- reqp[req_cnt].next_reqp = NULL;
- for (; req_cnt > 0; req_cnt--) {
- reqp[req_cnt - 1].next_reqp = &reqp[req_cnt];
- }
- board->adv_reqp = &reqp[0];
-
if (adv_dvc->chip_type == ADV_CHIP_ASC3550) {
ASC_DBG(2, "AdvInitAsc3550Driver()\n");
warn_code = AdvInitAsc3550Driver(adv_dvc);
@@ -11353,14 +10902,20 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost)
static void advansys_wide_free_mem(struct asc_board *board)
{
struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
- kfree(adv_dvc->carrier_buf);
- adv_dvc->carrier_buf = NULL;
- kfree(adv_dvc->orig_reqp);
- adv_dvc->orig_reqp = board->adv_reqp = NULL;
- while (board->adv_sgblkp) {
- adv_sgblk_t *sgp = board->adv_sgblkp;
- board->adv_sgblkp = sgp->next_sgblkp;
- kfree(sgp);
+
+ if (adv_dvc->carrier) {
+ dma_free_coherent(board->dev, ADV_CARRIER_BUFSIZE,
+ adv_dvc->carrier, adv_dvc->carrier_addr);
+ adv_dvc->carrier = NULL;
+ }
+ if (board->adv_reqp) {
+ dma_free_coherent(board->dev, board->adv_reqp_size,
+ board->adv_reqp, board->adv_reqp_addr);
+ board->adv_reqp = NULL;
+ }
+ if (board->adv_sgblk_pool) {
+ dma_pool_destroy(board->adv_sgblk_pool);
+ board->adv_sgblk_pool = NULL;
}
}
@@ -11431,28 +10986,28 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
switch (asc_dvc_varp->bus_type) {
#ifdef CONFIG_ISA
case ASC_IS_ISA:
- shost->unchecked_isa_dma = TRUE;
+ shost->unchecked_isa_dma = true;
share_irq = 0;
break;
case ASC_IS_VL:
- shost->unchecked_isa_dma = FALSE;
+ shost->unchecked_isa_dma = false;
share_irq = 0;
break;
case ASC_IS_EISA:
- shost->unchecked_isa_dma = FALSE;
+ shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
break;
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
case ASC_IS_PCI:
- shost->unchecked_isa_dma = FALSE;
+ shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
break;
#endif /* CONFIG_PCI */
default:
shost_printk(KERN_ERR, shost, "unknown adapter type: "
"%d\n", asc_dvc_varp->bus_type);
- shost->unchecked_isa_dma = TRUE;
+ shost->unchecked_isa_dma = false;
share_irq = 0;
break;
}
@@ -11471,7 +11026,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
* For Wide boards set PCI information before calling
* AdvInitGetConfig().
*/
- shost->unchecked_isa_dma = FALSE;
+ shost->unchecked_isa_dma = false;
share_irq = IRQF_SHARED;
ASC_DBG(2, "AdvInitGetConfig()\n");
@@ -11656,24 +11211,11 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
/* Set maximum number of queues the adapter can handle. */
shost->can_queue = adv_dvc_varp->max_host_qng;
}
-
- /*
- * Following v1.3.89, 'cmd_per_lun' is no longer needed
- * and should be set to zero.
- *
- * But because of a bug introduced in v1.3.89 if the driver is
- * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
- * SCSI function 'allocate_device' will panic. To allow the driver
- * to work as a module in these kernels set 'cmd_per_lun' to 1.
- *
- * Note: This is wrong. cmd_per_lun should be set to the depth
- * you want on untagged devices always.
- #ifdef MODULE
- */
- shost->cmd_per_lun = 1;
-/* #else
- shost->cmd_per_lun = 0;
-#endif */
+ ret = scsi_init_shared_tag_map(shost, shost->can_queue);
+ if (ret) {
+ shost_printk(KERN_ERR, shost, "init tag map failed\n");
+ goto err_free_dma;
+ }
/*
* Set the maximum number of scatter-gather elements the
@@ -11844,7 +11386,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
err_unmap:
if (boardp->ioremap_addr)
iounmap(boardp->ioremap_addr);
+#ifdef CONFIG_PCI
err_shost:
+#endif
return ret;
}
@@ -11927,6 +11471,7 @@ static int advansys_isa_probe(struct device *dev, unsigned int id)
board = shost_priv(shost);
board->irq = advansys_isa_irq_no(iop_base);
board->dev = dev;
+ board->shost = shost;
err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
if (err)
@@ -12009,6 +11554,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id)
board = shost_priv(shost);
board->irq = advansys_vlb_irq_no(iop_base);
board->dev = dev;
+ board->shost = shost;
err = advansys_board_found(shost, iop_base, ASC_IS_VL);
if (err)
@@ -12116,6 +11662,7 @@ static int advansys_eisa_probe(struct device *dev)
board = shost_priv(shost);
board->irq = irq;
board->dev = dev;
+ board->shost = shost;
err = advansys_board_found(shost, ioport, ASC_IS_EISA);
if (!err) {
@@ -12232,6 +11779,7 @@ static int advansys_pci_probe(struct pci_dev *pdev,
board = shost_priv(shost);
board->irq = pdev->irq;
board->dev = &pdev->dev;
+ board->shost = shost;
if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW ||
pdev->device == PCI_DEVICE_ID_38C0800_REV1 ||
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e31c460a1335..f44d0487236e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2922,7 +2922,6 @@ static struct scsi_host_template aha152x_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = aha152x_adjust_queue,
};
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index b95d2779f467..5b8b2937a3fe 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -950,7 +950,6 @@ static struct scsi_host_template driver_template = {
.can_queue = AHA1542_MAILBOXES,
.this_id = 7,
.sg_tablesize = 16,
- .cmd_per_lun = 1,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 31ace4bef8fe..bad35ffc015d 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -544,7 +544,6 @@ static struct scsi_host_template aha1740_template = {
.can_queue = AHA1740_ECBS,
.this_id = 7,
.sg_tablesize = AHA1740_SCATTER,
- .cmd_per_lun = AHA1740_CMDLUN,
.use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = aha1740_eh_abort_handler,
};
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index af23fd6bd795..b0c5603461ca 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -149,6 +149,5 @@ struct ecb { /* Enhanced Control Block 6.1 */
#define AHA1740_ECBS 32
#define AHA1740_SCATTER 16
-#define AHA1740_CMDLUN 1
#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 02a2512b76a8..4b135cca42a1 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -65,7 +65,6 @@ static struct scsi_host_template aic94xx_sht = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
- .cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 32d23212de48..3110736fd337 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -245,7 +245,6 @@ static struct scsi_host_template arxescsi_template = {
.can_queue = 0,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "arxescsi",
};
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index abc66f5263ec..faa1bee07c8a 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = {
.this_id = 7,
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "cumanascsi2",
};
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 5bf3c0d134b4..a8ad6880dd91 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = {
.this_id = 7,
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "eesox",
};
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 0836433e3a2d..05301bc752ee 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3158,7 +3158,6 @@ static struct scsi_host_template atp870u_template = {
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
.sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
- .cmd_per_lun = ATP870U_CMDLUN /* commands per lun */,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 62bae64a01c1..5cf62566ad42 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -10,7 +10,6 @@
#define MAX_SENSE 14
#define qcnt 32
#define ATP870U_SCATTER 128
-#define ATP870U_CMDLUN 1
#define MAX_ADAPTER 8
#define MAX_SCSI_ID 16
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 447cf7ce606e..185391a64d4b 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -452,6 +452,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -480,6 +481,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
case ASYNC_EVENT_NEW_ISCSI_CONN:
case ASYNC_EVENT_NEW_TCP_CONN:
phba->state |= BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG |
BEISCSI_LOG_MBOX,
@@ -488,6 +490,8 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
compl->flags);
break;
default:
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG |
BEISCSI_LOG_MBOX,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index f11d325fe696..cdfbc5c19cf4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -304,6 +304,17 @@ struct mgmt_auth_method_format {
struct mgmt_chap_format chap;
} __packed;
+struct be_cmd_req_logout_fw_sess {
+ struct be_cmd_req_hdr hdr; /* dw[4] */
+ uint32_t session_handle;
+} __packed;
+
+struct be_cmd_resp_logout_fw_sess {
+ struct be_cmd_resp_hdr hdr; /* dw[4] */
+#define BEISCSI_MGMT_SESSION_CLOSE 0x20
+ uint32_t session_status;
+} __packed;
+
struct mgmt_conn_login_options {
u8 flags;
u8 header_digest;
@@ -1136,6 +1147,7 @@ struct be_cmd_get_all_if_id_req {
#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
+#define OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET 24
#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f74760ce86c..7a6dbfbccec9 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -668,14 +668,20 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
return ret;
}
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret) {
+ dev_err(&pcidev->dev,
+ "beiscsi_enable_pci - request region failed\n");
+ goto pci_dev_disable;
+ }
+
pci_set_master(pcidev);
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) {
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
- pci_disable_device(pcidev);
- return ret;
+ goto pci_region_release;
} else {
ret = pci_set_consistent_dma_mask(pcidev,
DMA_BIT_MASK(32));
@@ -684,11 +690,17 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
- pci_disable_device(pcidev);
- return ret;
+ goto pci_region_release;
}
}
return 0;
+
+pci_region_release:
+ pci_release_regions(pcidev);
+pci_dev_disable:
+ pci_disable_device(pcidev);
+
+ return ret;
}
static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
@@ -1356,8 +1368,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
conn->rxdata_octets += resid;
unmap:
- scsi_dma_unmap(io_task->scsi_cmnd);
- io_task->scsi_cmnd = NULL;
+ if (io_task->scsi_cmnd) {
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
}
@@ -2037,11 +2051,16 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
/* Interpret compl as a async link evt */
beiscsi_async_link_state_process(phba,
(struct be_async_event_link_state *) mcc_compl);
- else
+ else {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
"BM_%d : Unsupported Async Event, flags"
" = 0x%08x\n",
mcc_compl->flags);
+ if (phba->state & BE_ADAPTER_LINK_UP) {
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
+ }
+ }
} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -3678,14 +3697,16 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
struct be_ctrl_info *ctrl = &phba->ctrl;
q = &phba->ctrl.mcc_obj.q;
- if (q->created)
+ if (q->created) {
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
- be_queue_free(phba, q);
+ be_queue_free(phba, q);
+ }
q = &phba->ctrl.mcc_obj.cq;
- if (q->created)
+ if (q->created) {
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
- be_queue_free(phba, q);
+ be_queue_free(phba, q);
+ }
}
static void hwi_cleanup(struct beiscsi_hba *phba)
@@ -3729,8 +3750,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
for (i = 0; i < (phba->num_cpus); i++) {
q = &phwi_context->be_cq[i];
- if (q->created)
+ if (q->created) {
+ be_queue_free(phba, q);
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ }
}
be_mcc_queues_destroy(phba);
@@ -3740,8 +3763,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
eq_for_mcc = 0;
for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
q = &phwi_context->be_eq[i].q;
- if (q->created)
+ if (q->created) {
+ be_queue_free(phba, q);
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+ }
}
be_cmd_fw_uninit(ctrl);
}
@@ -4328,8 +4353,14 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BM_%d : No boot session\n");
+
+ if (ret == -ENXIO)
+ phba->get_boot = 0;
+
+
return ret;
}
+ phba->get_boot = 0;
nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
sizeof(*session_resp),
&nonemb_cmd.dma);
@@ -4369,6 +4400,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
memcpy(&phba->boot_sess, &session_resp->session_info,
sizeof(struct mgmt_session_info));
+
+ beiscsi_logout_fw_sess(phba,
+ phba->boot_sess.session_handle);
ret = 0;
boot_freemem:
@@ -4580,11 +4614,13 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
spin_unlock_bh(&phba->mgmt_sgl_lock);
}
- if (io_task->mtask_addr)
+ if (io_task->mtask_addr) {
pci_unmap_single(phba->pcidev,
io_task->mtask_addr,
io_task->mtask_data_count,
PCI_DMA_TODEVICE);
+ io_task->mtask_addr = 0;
+ }
}
/**
@@ -5264,6 +5300,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
iscsi_host_free(phba->shost);
pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
+ pci_release_regions(pcidev);
pci_disable_device(pcidev);
}
@@ -5374,8 +5411,14 @@ beiscsi_hw_health_check(struct work_struct *work)
be_eqd_update(phba);
if (phba->state & BE_ADAPTER_CHECK_BOOT) {
- phba->state &= ~BE_ADAPTER_CHECK_BOOT;
- be_check_boot_session(phba);
+ if ((phba->get_boot > 0) && (!phba->boot_kset)) {
+ phba->get_boot--;
+ if (!(phba->get_boot % BE_GET_BOOT_TO))
+ be_check_boot_session(phba);
+ } else {
+ phba->state &= ~BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = 0;
+ }
}
beiscsi_ue_detect(phba);
@@ -5738,6 +5781,7 @@ hba_free:
iscsi_host_free(phba->shost);
pci_set_drvdata(pcidev, NULL);
disable_pci:
+ pci_release_regions(pcidev);
pci_disable_device(pcidev);
return ret;
}
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index e70ea26bbc2b..b8c0c7819cb1 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.4.114.0"
+#define BUILD_STR "10.6.0.0"
#define BE_NAME "Avago Technologies OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -109,6 +109,9 @@
#define BEISCSI_CLEAN_UNLOAD 0x01
#define BEISCSI_EEH_UNLOAD 0x02
+
+#define BE_GET_BOOT_RETRIES 45
+#define BE_GET_BOOT_TO 20
/**
* hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default
@@ -413,6 +416,7 @@ struct beiscsi_hba {
} fw_config;
unsigned int state;
+ int get_boot;
bool fw_timeout;
bool ue_detected;
struct delayed_work beiscsi_hw_check_task;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index c2c4d6975fb7..ca4016f20e76 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1707,3 +1707,72 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
(params->dw[offsetof(struct amap_beiscsi_offload_params,
exp_statsn) / 32] + 1));
}
+
+/**
+ * beiscsi_logout_fw_sess()- Firmware Session Logout
+ * @phba: Device priv structure instance
+ * @fw_sess_handle: FW session handle
+ *
+ * Logout from the FW established sessions.
+ * returns
+ * Success: 0
+ * Failure: Non-Zero Value
+ *
+ */
+int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
+ uint32_t fw_sess_handle)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_logout_fw_sess *req;
+ struct be_cmd_resp_logout_fw_sess *resp;
+ unsigned int tag;
+ int rc;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In bescsi_logout_fwboot_sess\n");
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : MBX Tag Failure\n");
+ return -EINVAL;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
+ sizeof(struct be_cmd_req_logout_fw_sess));
+
+ /* Set the session handle */
+ req->session_handle = fw_sess_handle;
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
+ return -EBUSY;
+ }
+
+ resp = embedded_payload(wrb);
+ if (resp->session_status !=
+ BEISCSI_MGMT_SESSION_CLOSE) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
+ resp->session_status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 9356b9a86b66..b58a7decbd94 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -338,4 +338,7 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba);
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
struct be_set_eqd *, int num);
+int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
+ uint32_t fw_sess_handle);
+
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index e53078d03309..72894378ffcf 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1173,8 +1173,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
spin_unlock_bh(&conn->session->back_lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+ spin_lock_bh(&conn->session->frwd_lock);
spin_lock_bh(&conn->session->back_lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
@@ -2093,7 +2095,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
else
/* wait for option-2 conn teardown */
wait_event_interruptible(bnx2i_ep->ofld_wait,
- bnx2i_ep->state != EP_STATE_DISCONN_START);
+ ((bnx2i_ep->state != EP_STATE_DISCONN_START)
+ && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
if (signal_pending(current))
flush_signals(current);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 2e66f34ebb79..622bdabc8894 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3928,6 +3928,7 @@ csio_hw_init(struct csio_hw *hw)
evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
if (!evt_entry) {
+ rv = -ENOMEM;
csio_err(hw, "Failed to initialize eventq");
goto err_evtq_cleanup;
}
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 3db4c63978c5..0e2bee937fe8 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1,7 +1,7 @@
/*
* cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
*
- * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@@ -32,8 +32,8 @@ static unsigned int dbg_level;
#define DRV_MODULE_NAME "cxgb3i"
#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
-#define DRV_MODULE_VERSION "2.0.0"
-#define DRV_MODULE_RELDATE "Jun. 2010"
+#define DRV_MODULE_VERSION "2.0.1-ko"
+#define DRV_MODULE_RELDATE "Apr. 2015"
static char version[] =
DRV_MODULE_DESC " " DRV_MODULE_NAME
@@ -156,7 +156,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
const struct l2t_entry *e)
{
- unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
+ unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
skb->priority = CPL_PRIORITY_SETUP;
@@ -172,7 +172,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
- V_RCV_BUFSIZ(cxgb3i_rcv_win>>10));
+ V_RCV_BUFSIZ(csk->rcv_win >> 10));
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
@@ -369,7 +369,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
V_TX_CPU_IDX(csk->rss_qid));
/* sendbuffer is in units of 32KB. */
- req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15));
+ req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
}
}
@@ -503,8 +503,8 @@ static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
csk, csk->state, csk->flags, csk->tid);
csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
- if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10))
- csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10);
+ if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
+ csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
@@ -988,6 +988,8 @@ static int init_act_open(struct cxgbi_sock *csk)
goto rel_resource;
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
+ csk->snd_win = cxgb3i_snd_win;
+ csk->rcv_win = cxgb3i_rcv_win;
csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
csk->wr_una_cred = 0;
@@ -1320,8 +1322,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev)
cdev->nports = adapter->params.nports;
cdev->mtus = adapter->params.mtus;
cdev->nmtus = NMTUS;
- cdev->snd_win = cxgb3i_snd_win;
- cdev->rcv_win = cxgb3i_rcv_win;
cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 20593fd69d8f..b0430c9359e7 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -1,7 +1,7 @@
/*
* cxgb3i.h: Chelsio S3xx iSCSI driver.
*
- * Copyright (c) 2008 Chelsio Communications, Inc.
+ * Copyright (c) 2008-2015 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index dd00e5fe4a5e..de6feb8964c9 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1,7 +1,7 @@
/*
* cxgb4i.c: Chelsio T4 iSCSI driver.
*
- * Copyright (c) 2010 Chelsio Communications, Inc.
+ * Copyright (c) 2010-2015 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,11 +36,12 @@ static unsigned int dbg_level;
#define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
-#define DRV_MODULE_VERSION "0.9.4"
+#define DRV_MODULE_VERSION "0.9.5-ko"
+#define DRV_MODULE_RELDATE "Apr. 2015"
static char version[] =
DRV_MODULE_DESC " " DRV_MODULE_NAME
- " v" DRV_MODULE_VERSION "\n";
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Chelsio Communications, Inc.");
MODULE_DESCRIPTION(DRV_MODULE_DESC);
@@ -50,11 +51,13 @@ MODULE_LICENSE("GPL");
module_param(dbg_level, uint, 0644);
MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
-static int cxgb4i_rcv_win = 256 * 1024;
+#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
+static int cxgb4i_rcv_win = -1;
module_param(cxgb4i_rcv_win, int, 0644);
MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
-static int cxgb4i_snd_win = 128 * 1024;
+#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
+static int cxgb4i_snd_win = -1;
module_param(cxgb4i_snd_win, int, 0644);
MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
@@ -196,10 +199,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
TX_CHAN_V(csk->tx_chan) |
SMAC_SEL_V(csk->smac_idx) |
ULP_MODE_V(ULP_MODE_ISCSI) |
- RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+ RCV_BUFSIZ_V(csk->rcv_win >> 10);
+
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
- (RX_FC_DISABLE_F) |
RSS_QUEUE_V(csk->rss_qid);
if (is_t4(lldi->adapter_type)) {
@@ -228,6 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
+ u32 isn = (prandom_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -241,7 +245,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
- opt2 |= 1 << 31;
+ req->rsvd = cpu_to_be32(isn);
+ opt2 |= T5_ISS_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
+
req->opt2 = cpu_to_be32(opt2);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -279,7 +286,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
TX_CHAN_V(csk->tx_chan) |
SMAC_SEL_V(csk->smac_idx) |
ULP_MODE_V(ULP_MODE_ISCSI) |
- RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+ RCV_BUFSIZ_V(csk->rcv_win >> 10);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
@@ -544,7 +551,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = htonl(csk->rcv_nxt);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
- flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
+ flowc->mnemval[6].val = htonl(csk->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = htonl(csk->advmss);
flowc->mnemval[8].mnemonic = 0;
@@ -557,7 +564,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
- csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
+ csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
csk->advmss);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
@@ -750,8 +757,8 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
* Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
* pass through opt0.
*/
- if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
- csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
+ if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
+ csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
if (TCPOPT_TSTAMP_G(tcp_opt))
@@ -1367,6 +1374,8 @@ static int init_act_open(struct cxgbi_sock *csk)
unsigned int step;
unsigned int size, size6;
int t4 = is_t4(lldi->adapter_type);
+ unsigned int linkspeed;
+ unsigned int rcv_winf, snd_winf;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
@@ -1440,6 +1449,21 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
+ linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
+ csk->snd_win = cxgb4i_snd_win;
+ csk->rcv_win = cxgb4i_rcv_win;
+ if (cxgb4i_rcv_win <= 0) {
+ csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
+ rcv_winf = linkspeed / SPEED_10000;
+ if (rcv_winf)
+ csk->rcv_win *= rcv_winf;
+ }
+ if (cxgb4i_snd_win <= 0) {
+ csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
+ snd_winf = linkspeed / SPEED_10000;
+ if (snd_winf)
+ csk->snd_win *= snd_winf;
+ }
csk->wr_cred = lldi->wr_cred -
DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
csk->wr_max_cred = csk->wr_cred;
@@ -1758,8 +1782,6 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
- cdev->snd_win = cxgb4i_snd_win;
- cdev->rcv_win = cxgb4i_rcv_win;
cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
index 1096026ba241..22dd8d670e4a 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -1,7 +1,7 @@
/*
* cxgb4i.h: Chelsio T4 iSCSI driver.
*
- * Copyright (c) 2010 Chelsio Communications, Inc.
+ * Copyright (c) 2010-2015 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,6 +23,8 @@
#define CXGB4I_TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+#define T5_ISS_VALID (1 << 18)
+
struct ulptx_idata {
__be32 cmd_more;
__be32 len;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index eb58afcfb73b..1d42e4f88b96 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1,7 +1,7 @@
/*
* libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
*
- * Copyright (c) 2010 Chelsio Communications, Inc.
+ * Copyright (c) 2010-2015 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,8 +38,12 @@ static unsigned int dbg_level;
#define DRV_MODULE_NAME "libcxgbi"
#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
-#define DRV_MODULE_VERSION "0.9.0"
-#define DRV_MODULE_RELDATE "Jun. 2010"
+#define DRV_MODULE_VERSION "0.9.1-ko"
+#define DRV_MODULE_RELDATE "Apr. 2015"
+
+static char version[] =
+ DRV_MODULE_DESC " " DRV_MODULE_NAME
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Chelsio Communications, Inc.");
MODULE_DESCRIPTION(DRV_MODULE_DESC);
@@ -1126,11 +1130,11 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
goto out_err;
}
- if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
+ if (csk->write_seq - csk->snd_una >= csk->snd_win) {
log_debug(1 << CXGBI_DBG_PDU_TX,
"csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
csk, csk->state, csk->flags, csk->tid, csk->write_seq,
- csk->snd_una, cdev->snd_win);
+ csk->snd_una, csk->snd_win);
err = -ENOBUFS;
goto out_err;
}
@@ -1885,7 +1889,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
"csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
csk->rcv_wup, cdev->rx_credit_thres,
- cdev->rcv_win);
+ csk->rcv_win);
if (csk->state != CTP_ESTABLISHED)
return;
@@ -1896,7 +1900,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
if (unlikely(cdev->rx_credit_thres == 0))
return;
- must_send = credits + 16384 >= cdev->rcv_win;
+ must_send = credits + 16384 >= csk->rcv_win;
if (must_send || credits >= cdev->rx_credit_thres)
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
}
@@ -2913,6 +2917,8 @@ static int __init libcxgbi_init_module(void)
sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+ pr_info("%s", version);
+
pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
ISCSI_ITT_MASK, sw_tag_idx_bits,
ISCSI_AGE_MASK, sw_tag_age_bits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index aba1af720df6..b3e5bd1d5d9c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -1,7 +1,7 @@
/*
* libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
*
- * Copyright (c) 2010 Chelsio Communications, Inc.
+ * Copyright (c) 2010-2015 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -234,6 +234,8 @@ struct cxgbi_sock {
u32 snd_nxt;
u32 snd_una;
u32 write_seq;
+ u32 snd_win;
+ u32 rcv_win;
};
/*
@@ -540,8 +542,6 @@ struct cxgbi_device {
struct iscsi_transport *itp;
unsigned int pfvf;
- unsigned int snd_win;
- unsigned int rcv_win;
unsigned int rx_credit_thres;
unsigned int skb_tx_rsvd;
unsigned int skb_rx_extra; /* for msg coalesced mode */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 2806cfbec2b9..f35ed53adaac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -3562,7 +3562,6 @@ static struct scsi_host_template driver_template = {
.slave_configure = adpt_slave_configure,
.can_queue = MAX_TO_IOP_MESSAGES,
.this_id = 7,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index fff682976c56..eefe14d453db 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1764,7 +1764,6 @@ struct scsi_host_template fdomain_driver_template = {
.can_queue = 1,
.this_id = 6,
.sg_tablesize = 64,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8eab107b53fb..1dafeb43333b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,8 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
@@ -56,7 +58,7 @@
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "3.4.4-1"
+#define HPSA_DRIVER_VERSION "3.4.10-0"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -129,6 +131,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -186,6 +189,7 @@ static struct board_type products[] = {
{0x21CC103C, "Smart Array", &SA5_access},
{0x21CD103C, "Smart Array", &SA5_access},
{0x21CE103C, "Smart HBA", &SA5_access},
+ {0x05809005, "SmartHBA-SA", &SA5_access},
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -194,6 +198,10 @@ static struct board_type products[] = {
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
+#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
+static const struct scsi_cmnd hpsa_cmd_busy;
+#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
+static const struct scsi_cmnd hpsa_cmd_idle;
static int number_of_controllers;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
@@ -207,6 +215,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
+ struct scsi_cmnd *scmd);
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -222,6 +233,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
+static int hpsa_slave_configure(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
@@ -232,7 +244,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int min_blocks, u32 *bucket_map);
-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static void hpsa_free_performant_mode(struct ctlr_info *h);
+static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
u32 *cfg_base_addr, u64 *cfg_base_addr_index,
@@ -252,6 +265,8 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
static void hpsa_command_resubmit_worker(struct work_struct *work);
+static u32 lockup_detected(struct ctlr_info *h);
+static int detect_controller_lockup(struct ctlr_info *h);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -265,40 +280,86 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
return (struct ctlr_info *) *priv;
}
+static inline bool hpsa_is_cmd_idle(struct CommandList *c)
+{
+ return c->scsi_cmd == SCSI_CMD_IDLE;
+}
+
+static inline bool hpsa_is_pending_event(struct CommandList *c)
+{
+ return c->abort_pending || c->reset_pending;
+}
+
+/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
+static void decode_sense_data(const u8 *sense_data, int sense_data_len,
+ u8 *sense_key, u8 *asc, u8 *ascq)
+{
+ struct scsi_sense_hdr sshdr;
+ bool rc;
+
+ *sense_key = -1;
+ *asc = -1;
+ *ascq = -1;
+
+ if (sense_data_len < 1)
+ return;
+
+ rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
+ if (rc) {
+ *sense_key = sshdr.sense_key;
+ *asc = sshdr.asc;
+ *ascq = sshdr.ascq;
+ }
+}
+
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
- if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+ u8 sense_key, asc, ascq;
+ int sense_len;
+
+ if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
+ sense_len = sizeof(c->err_info->SenseInfo);
+ else
+ sense_len = c->err_info->SenseLen;
+
+ decode_sense_data(c->err_info->SenseInfo, sense_len,
+ &sense_key, &asc, &ascq);
+ if (sense_key != UNIT_ATTENTION || asc == -1)
return 0;
- switch (c->err_info->SenseInfo[12]) {
+ switch (asc) {
case STATE_CHANGED:
- dev_warn(&h->pdev->dev, HPSA "%d: a state change "
- "detected, command retried\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "%s: a state change detected, command retried\n",
+ h->devname);
break;
case LUN_FAILED:
dev_warn(&h->pdev->dev,
- HPSA "%d: LUN failure detected\n", h->ctlr);
+ "%s: LUN failure detected\n", h->devname);
break;
case REPORT_LUNS_CHANGED:
dev_warn(&h->pdev->dev,
- HPSA "%d: report LUN data changed\n", h->ctlr);
+ "%s: report LUN data changed\n", h->devname);
/*
* Note: this REPORT_LUNS_CHANGED condition only occurs on the external
* target (array) devices.
*/
break;
case POWER_OR_RESET:
- dev_warn(&h->pdev->dev, HPSA "%d: a power on "
- "or device reset detected\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "%s: a power on or device reset detected\n",
+ h->devname);
break;
case UNIT_ATTENTION_CLEARED:
- dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
- "cleared by another initiator\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "%s: unit attention cleared by another initiator\n",
+ h->devname);
break;
default:
- dev_warn(&h->pdev->dev, HPSA "%d: unknown "
- "unit attention detected\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "%s: unknown unit attention detected\n",
+ h->devname);
break;
}
return 1;
@@ -314,6 +375,20 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
return 1;
}
+static u32 lockup_detected(struct ctlr_info *h);
+static ssize_t host_show_lockup_detected(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ld;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ ld = lockup_detected(h);
+
+ return sprintf(buf, "ld=%d\n", ld);
+}
+
static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -425,7 +500,7 @@ static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
0x324a103C, /* Smart Array P712m */
- 0x324b103C, /* SmartArray P711m */
+ 0x324b103C, /* Smart Array P711m */
0x3223103C, /* Smart Array P800 */
0x3234103C, /* Smart Array P400 */
0x3235103C, /* Smart Array P400i */
@@ -467,24 +542,32 @@ static u32 soft_unresettable_controller[] = {
0x409D0E11, /* Smart Array 6400 EM */
};
-static int ctlr_is_hard_resettable(u32 board_id)
+static u32 needs_abort_tags_swizzled[] = {
+ 0x323D103C, /* Smart Array P700m */
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+};
+
+static int board_id_in_array(u32 a[], int nelems, u32 board_id)
{
int i;
- for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
- if (unresettable_controller[i] == board_id)
- return 0;
- return 1;
+ for (i = 0; i < nelems; i++)
+ if (a[i] == board_id)
+ return 1;
+ return 0;
}
-static int ctlr_is_soft_resettable(u32 board_id)
+static int ctlr_is_hard_resettable(u32 board_id)
{
- int i;
+ return !board_id_in_array(unresettable_controller,
+ ARRAY_SIZE(unresettable_controller), board_id);
+}
- for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
- if (soft_unresettable_controller[i] == board_id)
- return 0;
- return 1;
+static int ctlr_is_soft_resettable(u32 board_id)
+{
+ return !board_id_in_array(soft_unresettable_controller,
+ ARRAY_SIZE(soft_unresettable_controller), board_id);
}
static int ctlr_is_resettable(u32 board_id)
@@ -493,6 +576,12 @@ static int ctlr_is_resettable(u32 board_id)
ctlr_is_soft_resettable(board_id);
}
+static int ctlr_needs_abort_tags_swizzled(u32 board_id)
+{
+ return board_id_in_array(needs_abort_tags_swizzled,
+ ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
+}
+
static ssize_t host_show_resettable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -647,12 +736,15 @@ static DEVICE_ATTR(transport_mode, S_IRUGO,
host_show_transport_mode, NULL);
static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
+static DEVICE_ATTR(lockup_detected, S_IRUGO,
+ host_show_lockup_detected, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
&dev_attr_hp_ssd_smart_path_enabled,
+ &dev_attr_lockup_detected,
NULL,
};
@@ -667,6 +759,9 @@ static struct device_attribute *hpsa_shost_attrs[] = {
NULL,
};
+#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
+ HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
+
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
.name = HPSA,
@@ -681,6 +776,7 @@ static struct scsi_host_template hpsa_driver_template = {
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
+ .slave_configure = hpsa_slave_configure,
.slave_destroy = hpsa_slave_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
@@ -743,30 +839,43 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
* a separate special register for submitting commands.
*/
-/* set_performant_mode: Modify the tag for cciss performant
+/*
+ * set_performant_mode: Modify the tag for cciss performant
* set bit 0 for pull model, bits 3-1 for block fetch
* register number
*/
-static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+#define DEFAULT_REPLY_QUEUE (-1)
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
+ int reply_queue)
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (likely(h->msix_vector > 0))
+ if (unlikely(!h->msix_vector))
+ return;
+ if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue =
raw_smp_processor_id() % h->nreply_queues;
+ else
+ c->Header.ReplyQueue = reply_queue % h->nreply_queues;
}
}
static void set_ioaccel1_performant_mode(struct ctlr_info *h,
- struct CommandList *c)
+ struct CommandList *c,
+ int reply_queue)
{
struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
- /* Tell the controller to post the reply to the queue for this
+ /*
+ * Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
- /* Set the bits in the address sent down to include:
+ if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
+ cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+ else
+ cp->ReplyQueue = reply_queue % h->nreply_queues;
+ /*
+ * Set the bits in the address sent down to include:
* - performant mode bit (bit 0)
* - pull count (bits 1-3)
* - command type (bits 4-6)
@@ -775,20 +884,48 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h,
IOACCEL1_BUSADDR_CMDTYPE;
}
-static void set_ioaccel2_performant_mode(struct ctlr_info *h,
- struct CommandList *c)
+static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
+ struct CommandList *c,
+ int reply_queue)
{
- struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
+ &h->ioaccel2_cmd_pool[c->cmdindex];
/* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ else
+ cp->reply_queue = reply_queue % h->nreply_queues;
/* Set the bits in the address sent down to include:
* - performant mode bit not used in ioaccel mode 2
* - pull count (bits 0-3)
* - command type isn't needed for ioaccel2
*/
+ c->busaddr |= h->ioaccel2_blockFetchTable[0];
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+ struct CommandList *c,
+ int reply_queue)
+{
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+ /*
+ * Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ else
+ cp->reply_queue = reply_queue % h->nreply_queues;
+ /*
+ * Set the bits in the address sent down to include:
+ * - performant mode bit not used in ioaccel mode 2
+ * - pull count (bits 0-3)
+ * - command type isn't needed for ioaccel2
+ */
c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
}
@@ -821,26 +958,38 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
}
-static void enqueue_cmd_and_start_io(struct ctlr_info *h,
- struct CommandList *c)
+static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c, int reply_queue)
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
switch (c->cmd_type) {
case CMD_IOACCEL1:
- set_ioaccel1_performant_mode(h, c);
+ set_ioaccel1_performant_mode(h, c, reply_queue);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
break;
case CMD_IOACCEL2:
- set_ioaccel2_performant_mode(h, c);
+ set_ioaccel2_performant_mode(h, c, reply_queue);
+ writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+ break;
+ case IOACCEL2_TMF:
+ set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
break;
default:
- set_performant_mode(h, c);
+ set_performant_mode(h, c, reply_queue);
h->access.submit_command(h, c);
}
}
+static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
+{
+ if (unlikely(hpsa_is_pending_event(c)))
+ return finish_cmd(c);
+
+ __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
+}
+
static inline int is_hba_lunid(unsigned char scsi3addr[])
{
return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
@@ -881,6 +1030,23 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
return !found;
}
+static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev, char *description)
+{
+ dev_printk(level, &h->pdev->dev,
+ "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+ description,
+ scsi_device_type(dev->devtype),
+ dev->vendor,
+ dev->model,
+ dev->raid_level > RAID_UNKNOWN ?
+ "RAID-?" : raid_label[dev->raid_level],
+ dev->offload_config ? '+' : '-',
+ dev->offload_enabled ? '+' : '-',
+ dev->expose_state);
+}
+
/* Add an entry into h->dev[] array. */
static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *device,
@@ -948,15 +1114,10 @@ lun_assigned:
h->ndevices++;
added[*nadded] = device;
(*nadded)++;
-
- /* initially, (before registering with scsi layer) we don't
- * know our hostno and we don't want to print anything first
- * time anyway (the scsi layer's inquiries will show that info)
- */
- /* if (hostno != -1) */
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
- scsi_device_type(device->devtype), hostno,
- device->bus, device->target, device->lun);
+ hpsa_show_dev_msg(KERN_INFO, h, device,
+ device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
+ device->offload_to_be_enabled = device->offload_enabled;
+ device->offload_enabled = 0;
return 0;
}
@@ -964,6 +1125,7 @@ lun_assigned:
static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
+ int offload_enabled;
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
@@ -982,16 +1144,29 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
*/
h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
- wmb(); /* ensure raid map updated prior to ->offload_enabled */
}
+ if (new_entry->hba_ioaccel_enabled) {
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
+ }
+ h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
h->dev[entry]->offload_config = new_entry->offload_config;
h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
- h->dev[entry]->offload_enabled = new_entry->offload_enabled;
h->dev[entry]->queue_depth = new_entry->queue_depth;
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
- scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
- new_entry->target, new_entry->lun);
+ /*
+ * We can turn off ioaccel offload now, but need to delay turning
+ * it on until we can update h->dev[entry]->phys_disk[], but we
+ * can't do that until all the devices are updated.
+ */
+ h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
+ if (!new_entry->offload_enabled)
+ h->dev[entry]->offload_enabled = 0;
+
+ offload_enabled = h->dev[entry]->offload_enabled;
+ h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
+ hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
+ h->dev[entry]->offload_enabled = offload_enabled;
}
/* Replace an entry from h->dev[] array. */
@@ -1017,9 +1192,9 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
- scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
- new_entry->target, new_entry->lun);
+ hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
+ new_entry->offload_to_be_enabled = new_entry->offload_enabled;
+ new_entry->offload_enabled = 0;
}
/* Remove an entry from h->dev[] array. */
@@ -1039,9 +1214,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
for (i = entry; i < h->ndevices-1; i++)
h->dev[i] = h->dev[i+1];
h->ndevices--;
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
- scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
- sd->lun);
+ hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
}
#define SCSI3ADDR_EQ(a, b) ( \
@@ -1283,6 +1456,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
nraid_map_entries = RAID_MAP_MAX_ENTRIES;
+ logical_drive->nphysical_disks = nraid_map_entries;
+
qdepth = 0;
for (i = 0; i < nraid_map_entries; i++) {
logical_drive->phys_disk[i] = NULL;
@@ -1312,7 +1487,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
*/
if (!logical_drive->phys_disk[i]) {
logical_drive->offload_enabled = 0;
- logical_drive->queue_depth = h->nr_cmds;
+ logical_drive->offload_to_be_enabled = 0;
+ logical_drive->queue_depth = 8;
}
}
if (nraid_map_entries)
@@ -1335,6 +1511,16 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
continue;
if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
continue;
+
+ /*
+ * If offload is currently enabled, the RAID map and
+ * phys_disk[] assignment *better* not be changing
+ * and since it isn't changing, we do not need to
+ * update it.
+ */
+ if (dev[i]->offload_enabled)
+ continue;
+
hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
}
}
@@ -1411,9 +1597,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
*/
if (sd[i]->volume_offline) {
hpsa_show_volume_status(h, sd[i]);
- dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
- h->scsi_host->host_no,
- sd[i]->bus, sd[i]->target, sd[i]->lun);
+ hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
continue;
}
@@ -1433,6 +1617,14 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
/* but if it does happen, we just ignore that device */
}
}
+ hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
+
+ /* Now that h->dev[]->phys_disk[] is coherent, we can enable
+ * any logical drives that need it enabled.
+ */
+ for (i = 0; i < h->ndevices; i++)
+ h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
+
spin_unlock_irqrestore(&h->devlock, flags);
/* Monitor devices which are in one of several NOT READY states to be
@@ -1456,20 +1648,22 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
sh = h->scsi_host;
/* Notify scsi mid layer of any removed devices */
for (i = 0; i < nremoved; i++) {
- struct scsi_device *sdev =
- scsi_device_lookup(sh, removed[i]->bus,
- removed[i]->target, removed[i]->lun);
- if (sdev != NULL) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else {
- /* We don't expect to get here.
- * future cmds to this device will get selection
- * timeout as if the device was gone.
- */
- dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
- " for removal.", hostno, removed[i]->bus,
- removed[i]->target, removed[i]->lun);
+ if (removed[i]->expose_state & HPSA_SCSI_ADD) {
+ struct scsi_device *sdev =
+ scsi_device_lookup(sh, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ if (sdev != NULL) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ /*
+ * We don't expect to get here.
+ * future cmds to this device will get selection
+ * timeout as if the device was gone.
+ */
+ hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
+ "didn't find device for removal.");
+ }
}
kfree(removed[i]);
removed[i] = NULL;
@@ -1477,16 +1671,18 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
/* Notify scsi mid layer of any added devices */
for (i = 0; i < nadded; i++) {
+ if (!(added[i]->expose_state & HPSA_SCSI_ADD))
+ continue;
if (scsi_add_device(sh, added[i]->bus,
added[i]->target, added[i]->lun) == 0)
continue;
- dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
- "device not added.\n", hostno, added[i]->bus,
- added[i]->target, added[i]->lun);
+ hpsa_show_dev_msg(KERN_WARNING, h, added[i],
+ "addition failed, device not added.");
/* now we have to remove it from h->dev,
* since it didn't get added to scsi mid layer
*/
fixup_botched_add(h, added[i]);
+ added[i] = NULL;
}
free_and_out:
@@ -1512,7 +1708,6 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
return NULL;
}
-/* link sdev->hostdata to our per-device structure. */
static int hpsa_slave_alloc(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd;
@@ -1523,21 +1718,80 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
spin_lock_irqsave(&h->devlock, flags);
sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
sdev_id(sdev), sdev->lun);
- if (sd != NULL) {
- sdev->hostdata = sd;
- if (sd->queue_depth)
- scsi_change_queue_depth(sdev, sd->queue_depth);
+ if (likely(sd)) {
atomic_set(&sd->ioaccel_cmds_out, 0);
- }
+ sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
+ } else
+ sdev->hostdata = NULL;
spin_unlock_irqrestore(&h->devlock, flags);
return 0;
}
+/* configure scsi device based on internal per-device structure */
+static int hpsa_slave_configure(struct scsi_device *sdev)
+{
+ struct hpsa_scsi_dev_t *sd;
+ int queue_depth;
+
+ sd = sdev->hostdata;
+ sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
+
+ if (sd)
+ queue_depth = sd->queue_depth != 0 ?
+ sd->queue_depth : sdev->host->can_queue;
+ else
+ queue_depth = sdev->host->can_queue;
+
+ scsi_change_queue_depth(sdev, queue_depth);
+
+ return 0;
+}
+
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
/* nothing to do. */
}
+static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (!h->ioaccel2_cmd_sg_list)
+ return;
+ for (i = 0; i < h->nr_cmds; i++) {
+ kfree(h->ioaccel2_cmd_sg_list[i]);
+ h->ioaccel2_cmd_sg_list[i] = NULL;
+ }
+ kfree(h->ioaccel2_cmd_sg_list);
+ h->ioaccel2_cmd_sg_list = NULL;
+}
+
+static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (h->chainsize <= 0)
+ return 0;
+
+ h->ioaccel2_cmd_sg_list =
+ kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
+ GFP_KERNEL);
+ if (!h->ioaccel2_cmd_sg_list)
+ return -ENOMEM;
+ for (i = 0; i < h->nr_cmds; i++) {
+ h->ioaccel2_cmd_sg_list[i] =
+ kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
+ h->maxsgentries, GFP_KERNEL);
+ if (!h->ioaccel2_cmd_sg_list[i])
+ goto clean;
+ }
+ return 0;
+
+clean:
+ hpsa_free_ioaccel2_sg_chain_blocks(h);
+ return -ENOMEM;
+}
+
static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
{
int i;
@@ -1552,7 +1806,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = NULL;
}
-static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
+static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
{
int i;
@@ -1580,6 +1834,39 @@ clean:
return -ENOMEM;
}
+static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
+ struct io_accel2_cmd *cp, struct CommandList *c)
+{
+ struct ioaccel2_sg_element *chain_block;
+ u64 temp64;
+ u32 chain_size;
+
+ chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
+ chain_size = le32_to_cpu(cp->data_len);
+ temp64 = pci_map_single(h->pdev, chain_block, chain_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ cp->sg->address = 0;
+ return -1;
+ }
+ cp->sg->address = cpu_to_le64(temp64);
+ return 0;
+}
+
+static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
+ struct io_accel2_cmd *cp)
+{
+ struct ioaccel2_sg_element *chain_sg;
+ u64 temp64;
+ u32 chain_size;
+
+ chain_sg = cp->sg;
+ temp64 = le64_to_cpu(chain_sg->address);
+ chain_size = le32_to_cpu(cp->data_len);
+ pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+}
+
static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
@@ -1629,6 +1916,7 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
{
int data_len;
int retry = 0;
+ u32 ioaccel2_resid = 0;
switch (c2->error_data.serv_response) {
case IOACCEL2_SERV_RESPONSE_COMPLETE:
@@ -1636,9 +1924,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
- dev_warn(&h->pdev->dev,
- "%s: task complete with check condition.\n",
- "HP SSD Smart Path");
cmd->result |= SAM_STAT_CHECK_CONDITION;
if (c2->error_data.data_present !=
IOACCEL2_SENSE_DATA_PRESENT) {
@@ -1658,58 +1943,56 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
retry = 1;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
- dev_warn(&h->pdev->dev,
- "%s: task complete with BUSY status.\n",
- "HP SSD Smart Path");
retry = 1;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
- dev_warn(&h->pdev->dev,
- "%s: task complete with reservation conflict.\n",
- "HP SSD Smart Path");
retry = 1;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
- /* Make scsi midlayer do unlimited retries */
- cmd->result = DID_IMM_RETRY << 16;
+ retry = 1;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
- dev_warn(&h->pdev->dev,
- "%s: task complete with aborted status.\n",
- "HP SSD Smart Path");
retry = 1;
break;
default:
- dev_warn(&h->pdev->dev,
- "%s: task complete with unrecognized status: 0x%02x\n",
- "HP SSD Smart Path", c2->error_data.status);
retry = 1;
break;
}
break;
case IOACCEL2_SERV_RESPONSE_FAILURE:
- /* don't expect to get here. */
- dev_warn(&h->pdev->dev,
- "unexpected delivery or target failure, status = 0x%02x\n",
- c2->error_data.status);
- retry = 1;
+ switch (c2->error_data.status) {
+ case IOACCEL2_STATUS_SR_IO_ERROR:
+ case IOACCEL2_STATUS_SR_IO_ABORTED:
+ case IOACCEL2_STATUS_SR_OVERRUN:
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_UNDERRUN:
+ cmd->result = (DID_OK << 16); /* host byte */
+ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ ioaccel2_resid = get_unaligned_le32(
+ &c2->error_data.resid_cnt[0]);
+ scsi_set_resid(cmd, ioaccel2_resid);
+ break;
+ case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
+ case IOACCEL2_STATUS_SR_INVALID_DEVICE:
+ case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
+ /* We will get an event from ctlr to trigger rescan */
+ retry = 1;
+ break;
+ default:
+ retry = 1;
+ }
break;
case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
break;
case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
break;
case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
- dev_warn(&h->pdev->dev, "task management function rejected.\n");
retry = 1;
break;
case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
- dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
break;
default:
- dev_warn(&h->pdev->dev,
- "%s: Unrecognized server response: 0x%02x\n",
- "HP SSD Smart Path",
- c2->error_data.serv_response);
retry = 1;
break;
}
@@ -1717,6 +2000,87 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
return retry; /* retry on raid path? */
}
+static void hpsa_cmd_resolve_events(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ bool do_wake = false;
+
+ /*
+ * Prevent the following race in the abort handler:
+ *
+ * 1. LLD is requested to abort a SCSI command
+ * 2. The SCSI command completes
+ * 3. The struct CommandList associated with step 2 is made available
+ * 4. New I/O request to LLD to another LUN re-uses struct CommandList
+ * 5. Abort handler follows scsi_cmnd->host_scribble and
+ * finds struct CommandList and tries to aborts it
+ * Now we have aborted the wrong command.
+ *
+ * Reset c->scsi_cmd here so that the abort or reset handler will know
+ * this command has completed. Then, check to see if the handler is
+ * waiting for this command, and, if so, wake it.
+ */
+ c->scsi_cmd = SCSI_CMD_IDLE;
+ mb(); /* Declare command idle before checking for pending events. */
+ if (c->abort_pending) {
+ do_wake = true;
+ c->abort_pending = false;
+ }
+ if (c->reset_pending) {
+ unsigned long flags;
+ struct hpsa_scsi_dev_t *dev;
+
+ /*
+ * There appears to be a reset pending; lock the lock and
+ * reconfirm. If so, then decrement the count of outstanding
+ * commands and wake the reset command if this is the last one.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+ dev = c->reset_pending; /* Re-fetch under the lock. */
+ if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
+ do_wake = true;
+ c->reset_pending = NULL;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+ if (do_wake)
+ wake_up_all(&h->event_sync_wait_queue);
+}
+
+static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ hpsa_cmd_resolve_events(h, c);
+ cmd_tagged_free(h, c);
+}
+
+static void hpsa_cmd_free_and_done(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd)
+{
+ hpsa_cmd_resolve_and_free(h, c);
+ cmd->scsi_done(cmd);
+}
+
+static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
+{
+ INIT_WORK(&c->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
+}
+
+static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
+{
+ cmd->result = DID_ABORT << 16;
+}
+
+static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
+ struct scsi_cmnd *cmd)
+{
+ hpsa_set_scsi_cmd_aborted(cmd);
+ dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
+ c->Request.CDB, c->err_info->ScsiStatus);
+ hpsa_cmd_resolve_and_free(h, c);
+}
+
static void process_ioaccel2_completion(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
struct hpsa_scsi_dev_t *dev)
@@ -1725,13 +2089,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0)) {
- cmd_free(h, c);
- cmd->scsi_done(cmd);
- return;
- }
+ c2->error_data.status == 0))
+ return hpsa_cmd_free_and_done(h, c, cmd);
- /* Any RAID offload error results in retry which will use
+ /*
+ * Any RAID offload error results in retry which will use
* the normal I/O path so the controller can handle whatever's
* wrong.
*/
@@ -1741,19 +2103,42 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
dev->offload_enabled = 0;
- goto retry_cmd;
+
+ return hpsa_retry_cmd(h, c);
}
if (handle_ioaccel_mode2_error(h, c, cmd, c2))
- goto retry_cmd;
+ return hpsa_retry_cmd(h, c);
- cmd_free(h, c);
- cmd->scsi_done(cmd);
- return;
+ return hpsa_cmd_free_and_done(h, c, cmd);
+}
-retry_cmd:
- INIT_WORK(&c->work, hpsa_command_resubmit_worker);
- queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
+/* Returns 0 on success, < 0 otherwise. */
+static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
+ struct CommandList *cp)
+{
+ u8 tmf_status = cp->err_info->ScsiStatus;
+
+ switch (tmf_status) {
+ case CISS_TMF_COMPLETE:
+ /*
+ * CISS_TMF_COMPLETE never happens, instead,
+ * ei->CommandStatus == 0 for this case.
+ */
+ case CISS_TMF_SUCCESS:
+ return 0;
+ case CISS_TMF_INVALID_FRAME:
+ case CISS_TMF_NOT_SUPPORTED:
+ case CISS_TMF_FAILED:
+ case CISS_TMF_WRONG_LUN:
+ case CISS_TMF_OVERLAPPED_TAG:
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
+ tmf_status);
+ break;
+ }
+ return -tmf_status;
}
static void complete_scsi_command(struct CommandList *cp)
@@ -1762,51 +2147,58 @@ static void complete_scsi_command(struct CommandList *cp)
struct ctlr_info *h;
struct ErrorInfo *ei;
struct hpsa_scsi_dev_t *dev;
+ struct io_accel2_cmd *c2;
- unsigned char sense_key;
- unsigned char asc; /* additional sense code */
- unsigned char ascq; /* additional sense code qualifier */
+ u8 sense_key;
+ u8 asc; /* additional sense code */
+ u8 ascq; /* additional sense code qualifier */
unsigned long sense_data_size;
ei = cp->err_info;
cmd = cp->scsi_cmd;
h = cp->h;
dev = cmd->device->hostdata;
+ c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
scsi_dma_unmap(cmd); /* undo the DMA mappings */
if ((cp->cmd_type == CMD_SCSI) &&
(le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
hpsa_unmap_sg_chain_block(h, cp);
+ if ((cp->cmd_type == CMD_IOACCEL2) &&
+ (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
+ hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
+
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
- if (cp->cmd_type == CMD_IOACCEL2)
- return process_ioaccel2_completion(h, cp, cmd, dev);
-
- cmd->result |= ei->ScsiStatus;
+ /*
+ * We check for lockup status here as it may be set for
+ * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
+ * fail_all_oustanding_cmds()
+ */
+ if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
+ /* DID_NO_CONNECT will prevent a retry */
+ cmd->result = DID_NO_CONNECT << 16;
+ return hpsa_cmd_free_and_done(h, cp, cmd);
+ }
- scsi_set_resid(cmd, ei->ResidualCnt);
- if (ei->CommandStatus == 0) {
- if (cp->cmd_type == CMD_IOACCEL1)
- atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
- return;
+ if ((unlikely(hpsa_is_pending_event(cp)))) {
+ if (cp->reset_pending)
+ return hpsa_cmd_resolve_and_free(h, cp);
+ if (cp->abort_pending)
+ return hpsa_cmd_abort_and_free(h, cp, cmd);
}
- /* copy the sense data */
- if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
- sense_data_size = SCSI_SENSE_BUFFERSIZE;
- else
- sense_data_size = sizeof(ei->SenseInfo);
- if (ei->SenseLen < sense_data_size)
- sense_data_size = ei->SenseLen;
+ if (cp->cmd_type == CMD_IOACCEL2)
+ return process_ioaccel2_completion(h, cp, cmd, dev);
- memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
+ scsi_set_resid(cmd, ei->ResidualCnt);
+ if (ei->CommandStatus == 0)
+ return hpsa_cmd_free_and_done(h, cp, cmd);
/* For I/O accelerator commands, copy over some fields to the normal
* CISS header used below for error handling.
@@ -1828,10 +2220,7 @@ static void complete_scsi_command(struct CommandList *cp)
if (is_logical_dev_addr_mode(dev->scsi3addr)) {
if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
dev->offload_enabled = 0;
- INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
- queue_work_on(raw_smp_processor_id(),
- h->resubmit_wq, &cp->work);
- return;
+ return hpsa_retry_cmd(h, cp);
}
}
@@ -1839,14 +2228,18 @@ static void complete_scsi_command(struct CommandList *cp)
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
- if (ei->ScsiStatus) {
- /* Get sense key */
- sense_key = 0xf & ei->SenseInfo[2];
- /* Get additional sense code */
- asc = ei->SenseInfo[12];
- /* Get addition sense code qualifier */
- ascq = ei->SenseInfo[13];
- }
+ cmd->result |= ei->ScsiStatus;
+ /* copy the sense data */
+ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+ sense_data_size = SCSI_SENSE_BUFFERSIZE;
+ else
+ sense_data_size = sizeof(ei->SenseInfo);
+ if (ei->SenseLen < sense_data_size)
+ sense_data_size = ei->SenseLen;
+ memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
+ if (ei->ScsiStatus)
+ decode_sense_data(ei->SenseInfo, sense_data_size,
+ &sense_key, &asc, &ascq);
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
if (sense_key == ABORTED_COMMAND) {
cmd->result |= DID_SOFT_ERROR << 16;
@@ -1918,10 +2311,8 @@ static void complete_scsi_command(struct CommandList *cp)
cp->Request.CDB);
break;
case CMD_ABORTED:
- cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
- cp->Request.CDB, ei->ScsiStatus);
- break;
+ /* Return now to avoid calling scsi_done(). */
+ return hpsa_cmd_abort_and_free(h, cp, cmd);
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
@@ -1941,6 +2332,10 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "Command unabortable\n");
break;
+ case CMD_TMF_STATUS:
+ if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
+ cmd->result = DID_ERROR << 16;
+ break;
case CMD_IOACCEL_DISABLED:
/* This only handles the direct pass-through case since RAID
* offload is handled above. Just attempt a retry.
@@ -1954,8 +2349,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
+
+ return hpsa_cmd_free_and_done(h, cp, cmd);
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -1998,14 +2393,36 @@ static int hpsa_map_one(struct pci_dev *pdev,
return 0;
}
-static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
- struct CommandList *c)
+#define NO_TIMEOUT ((unsigned long) -1)
+#define DEFAULT_TIMEOUT 30000 /* milliseconds */
+static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+ struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
{
DECLARE_COMPLETION_ONSTACK(wait);
c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
+ __enqueue_cmd_and_start_io(h, c, reply_queue);
+ if (timeout_msecs == NO_TIMEOUT) {
+ /* TODO: get rid of this no-timeout thing */
+ wait_for_completion_io(&wait);
+ return IO_OK;
+ }
+ if (!wait_for_completion_io_timeout(&wait,
+ msecs_to_jiffies(timeout_msecs))) {
+ dev_warn(&h->pdev->dev, "Command timed out.\n");
+ return -ETIMEDOUT;
+ }
+ return IO_OK;
+}
+
+static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
+ int reply_queue, unsigned long timeout_msecs)
+{
+ if (unlikely(lockup_detected(h))) {
+ c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
+ return IO_OK;
+ }
+ return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
}
static u32 lockup_detected(struct ctlr_info *h)
@@ -2020,25 +2437,19 @@ static u32 lockup_detected(struct ctlr_info *h)
return rc;
}
-static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
- struct CommandList *c)
-{
- /* If controller lockup detected, fake a hardware error. */
- if (unlikely(lockup_detected(h)))
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- else
- hpsa_scsi_do_simple_cmd_core(h, c);
-}
-
#define MAX_DRIVER_CMD_RETRIES 25
-static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
- struct CommandList *c, int data_direction)
+static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
+ struct CommandList *c, int data_direction, unsigned long timeout_msecs)
{
int backoff_time = 10, retry_count = 0;
+ int rc;
do {
memset(c->err_info, 0, sizeof(*c->err_info));
- hpsa_scsi_do_simple_cmd_core(h, c);
+ rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+ timeout_msecs);
+ if (rc)
+ break;
retry_count++;
if (retry_count > 3) {
msleep(backoff_time);
@@ -2049,6 +2460,9 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
check_for_busy(h, c)) &&
retry_count <= MAX_DRIVER_CMD_RETRIES);
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
+ if (retry_count > MAX_DRIVER_CMD_RETRIES)
+ rc = -EIO;
+ return rc;
}
static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
@@ -2072,16 +2486,23 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
{
const struct ErrorInfo *ei = cp->err_info;
struct device *d = &cp->h->pdev->dev;
- const u8 *sd = ei->SenseInfo;
+ u8 sense_key, asc, ascq;
+ int sense_len;
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
+ if (ei->SenseLen > sizeof(ei->SenseInfo))
+ sense_len = sizeof(ei->SenseInfo);
+ else
+ sense_len = ei->SenseLen;
+ decode_sense_data(ei->SenseInfo, sense_len,
+ &sense_key, &asc, &ascq);
hpsa_print_cmd(h, "SCSI status", cp);
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
- dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
- sd[2] & 0x0f, sd[12], sd[13]);
+ dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
+ sense_key, asc, ascq);
else
- dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
+ dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
if (ei->ScsiStatus == 0)
dev_warn(d, "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
@@ -2125,6 +2546,9 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
case CMD_UNABORTABLE:
hpsa_print_cmd(h, "unabortable", cp);
break;
+ case CMD_CTLR_LOCKUP:
+ hpsa_print_cmd(h, "controller lockup detected", cp);
+ break;
default:
hpsa_print_cmd(h, "unknown status", cp);
dev_warn(d, "Unknown command status %x\n",
@@ -2142,17 +2566,15 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
c = cmd_alloc(h);
- if (c == NULL) {
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -ENOMEM;
- }
-
if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
page, scsi3addr, TYPE_CMD)) {
rc = -1;
goto out;
}
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -2172,17 +2594,15 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
struct ErrorInfo *ei;
c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -ENOMEM;
- }
-
if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
page, scsi3addr, TYPE_CMD)) {
rc = -1;
goto out;
}
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -2191,10 +2611,10 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
out:
cmd_free(h, c);
return rc;
- }
+}
static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
- u8 reset_type)
+ u8 reset_type, int reply_queue)
{
int rc = IO_OK;
struct CommandList *c;
@@ -2202,16 +2622,16 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -ENOMEM;
- }
/* fill_cmd can't fail here, no data buffer to map. */
(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
scsi3addr, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
- hpsa_scsi_do_simple_cmd_core(h, c);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ if (rc) {
+ dev_warn(&h->pdev->dev, "Failed to send reset command\n");
+ goto out;
+ }
/* no unmap needed here because no data xfer. */
ei = c->err_info;
@@ -2219,10 +2639,129 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
hpsa_scsi_interpret_error(h, c);
rc = -1;
}
+out:
cmd_free(h, c);
return rc;
}
+static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
+ struct hpsa_scsi_dev_t *dev,
+ unsigned char *scsi3addr)
+{
+ int i;
+ bool match = false;
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
+
+ if (hpsa_is_cmd_idle(c))
+ return false;
+
+ switch (c->cmd_type) {
+ case CMD_SCSI:
+ case CMD_IOCTL_PEND:
+ match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
+ sizeof(c->Header.LUN.LunAddrBytes));
+ break;
+
+ case CMD_IOACCEL1:
+ case CMD_IOACCEL2:
+ if (c->phys_disk == dev) {
+ /* HBA mode match */
+ match = true;
+ } else {
+ /* Possible RAID mode -- check each phys dev. */
+ /* FIXME: Do we need to take out a lock here? If
+ * so, we could just call hpsa_get_pdisk_of_ioaccel2()
+ * instead. */
+ for (i = 0; i < dev->nphysical_disks && !match; i++) {
+ /* FIXME: an alternate test might be
+ *
+ * match = dev->phys_disk[i]->ioaccel_handle
+ * == c2->scsi_nexus; */
+ match = dev->phys_disk[i] == c->phys_disk;
+ }
+ }
+ break;
+
+ case IOACCEL2_TMF:
+ for (i = 0; i < dev->nphysical_disks && !match; i++) {
+ match = dev->phys_disk[i]->ioaccel_handle ==
+ le32_to_cpu(ac->it_nexus);
+ }
+ break;
+
+ case 0: /* The command is in the middle of being initialized. */
+ match = false;
+ break;
+
+ default:
+ dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
+ c->cmd_type);
+ BUG();
+ }
+
+ return match;
+}
+
+static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
+ unsigned char *scsi3addr, u8 reset_type, int reply_queue)
+{
+ int i;
+ int rc = 0;
+
+ /* We can really only handle one reset at a time */
+ if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
+ dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
+ return -EINTR;
+ }
+
+ BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
+
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct CommandList *c = h->cmd_pool + i;
+ int refcount = atomic_inc_return(&c->refcount);
+
+ if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
+ unsigned long flags;
+
+ /*
+ * Mark the target command as having a reset pending,
+ * then lock a lock so that the command cannot complete
+ * while we're considering it. If the command is not
+ * idle then count it; otherwise revoke the event.
+ */
+ c->reset_pending = dev;
+ spin_lock_irqsave(&h->lock, flags); /* Implied MB */
+ if (!hpsa_is_cmd_idle(c))
+ atomic_inc(&dev->reset_cmds_out);
+ else
+ c->reset_pending = NULL;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+ cmd_free(h, c);
+ }
+
+ rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
+ if (!rc)
+ wait_event(h->event_sync_wait_queue,
+ atomic_read(&dev->reset_cmds_out) == 0 ||
+ lockup_detected(h));
+
+ if (unlikely(lockup_detected(h))) {
+ dev_warn(&h->pdev->dev,
+ "Controller lockup detected during reset wait\n");
+ mutex_unlock(&h->reset_mutex);
+ rc = -ENODEV;
+ }
+
+ if (unlikely(rc))
+ atomic_set(&dev->reset_cmds_out, 0);
+
+ mutex_unlock(&h->reset_mutex);
+ return rc;
+}
+
static void hpsa_get_raid_level(struct ctlr_info *h,
unsigned char *scsi3addr, unsigned char *raid_level)
{
@@ -2328,23 +2867,23 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
struct ErrorInfo *ei;
c = cmd_alloc(h);
- if (c == NULL) {
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -ENOMEM;
- }
+
if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
sizeof(this_device->raid_map), 0,
scsi3addr, TYPE_CMD)) {
- dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+ dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
cmd_free(h, c);
- return -ENOMEM;
+ return -1;
}
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
- cmd_free(h, c);
- return -1;
+ rc = -1;
+ goto out;
}
cmd_free(h, c);
@@ -2356,6 +2895,9 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
}
hpsa_debug_map_buff(h, rc, &this_device->raid_map);
return rc;
+out:
+ cmd_free(h, c);
+ return rc;
}
static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
@@ -2375,7 +2917,8 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -2438,6 +2981,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config = 0;
this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
@@ -2461,6 +3005,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
if (hpsa_get_raid_map(h, scsi3addr, this_device))
this_device->offload_enabled = 0;
}
+ this_device->offload_to_be_enabled = this_device->offload_enabled;
out:
kfree(buf);
return;
@@ -2495,10 +3040,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
struct ErrorInfo *ei;
c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -1;
- }
+
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
@@ -2508,7 +3050,10 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
if (extended_response)
c->Request.CDB[1] = extended_response;
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -2600,8 +3145,10 @@ static int hpsa_volume_offline(struct ctlr_info *h,
unsigned char scsi3addr[])
{
struct CommandList *c;
- unsigned char *sense, sense_key, asc, ascq;
- int ldstat = 0;
+ unsigned char *sense;
+ u8 sense_key, asc, ascq;
+ int sense_len;
+ int rc, ldstat = 0;
u16 cmd_status;
u8 scsi_status;
#define ASC_LUN_NOT_READY 0x04
@@ -2609,14 +3156,19 @@ static int hpsa_volume_offline(struct ctlr_info *h,
#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
c = cmd_alloc(h);
- if (!c)
- return 0;
+
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
- hpsa_scsi_do_simple_cmd_core(h, c);
+ rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ if (rc) {
+ cmd_free(h, c);
+ return 0;
+ }
sense = c->err_info->SenseInfo;
- sense_key = sense[2];
- asc = sense[12];
- ascq = sense[13];
+ if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
+ sense_len = sizeof(c->err_info->SenseInfo);
+ else
+ sense_len = c->err_info->SenseLen;
+ decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
cmd_status = c->err_info->CommandStatus;
scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
@@ -2656,6 +3208,52 @@ static int hpsa_volume_offline(struct ctlr_info *h,
return 0;
}
+/*
+ * Find out if a logical device supports aborts by simply trying one.
+ * Smart Array may claim not to support aborts on logical drives, but
+ * if a MSA2000 * is connected, the drives on that will be presented
+ * by the Smart Array as logical drives, and aborts may be sent to
+ * those devices successfully. So the simplest way to find out is
+ * to simply try an abort and see how the device responds.
+ */
+static int hpsa_device_supports_aborts(struct ctlr_info *h,
+ unsigned char *scsi3addr)
+{
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+ int rc = 0;
+
+ u64 tag = (u64) -1; /* bogus tag */
+
+ /* Assume that physical devices support aborts */
+ if (!is_logical_dev_addr_mode(scsi3addr))
+ return 1;
+
+ c = cmd_alloc(h);
+
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
+ (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ /* no unmap needed here because no data xfer. */
+ ei = c->err_info;
+ switch (ei->CommandStatus) {
+ case CMD_INVALID:
+ rc = 0;
+ break;
+ case CMD_UNABORTABLE:
+ case CMD_ABORT_FAILED:
+ rc = 1;
+ break;
+ case CMD_TMF_STATUS:
+ rc = hpsa_evaluate_tmf_status(h, c);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ cmd_free(h, c);
+ return rc;
+}
+
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -2708,6 +3306,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
+ this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
}
@@ -2721,7 +3321,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
strncmp(obdr_sig, OBDR_TAPE_SIG,
OBDR_SIG_LEN) == 0);
}
-
kfree(inq_buff);
return 0;
@@ -2730,6 +3329,31 @@ bail_out:
return 1;
}
+static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
+{
+ unsigned long flags;
+ int rc, entry;
+ /*
+ * See if this device supports aborts. If we already know
+ * the device, we already know if it supports aborts, otherwise
+ * we have to find out if it supports aborts by trying one.
+ */
+ spin_lock_irqsave(&h->devlock, flags);
+ rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
+ if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
+ entry >= 0 && entry < h->ndevices) {
+ dev->supports_aborts = h->dev[entry]->supports_aborts;
+ spin_unlock_irqrestore(&h->devlock, flags);
+ } else {
+ spin_unlock_irqrestore(&h->devlock, flags);
+ dev->supports_aborts =
+ hpsa_device_supports_aborts(h, scsi3addr);
+ if (dev->supports_aborts < 0)
+ dev->supports_aborts = 0;
+ }
+}
+
static unsigned char *ext_target_model[] = {
"MSA2012",
"MSA2024",
@@ -2835,6 +3459,7 @@ static int add_ext_target_dev(struct ctlr_info *h,
(*n_ext_target_devs)++;
hpsa_set_bus_target_lun(this_device,
tmpdevice->bus, tmpdevice->target, 0);
+ hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
set_bit(tmpdevice->target, lunzerobits);
return 1;
}
@@ -2850,88 +3475,23 @@ static int add_ext_target_dev(struct ctlr_info *h,
static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
{
- struct ReportExtendedLUNdata *physicals = NULL;
- int responsesize = 24; /* size of physical extended response */
- int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
- u32 nphysicals = 0; /* number of reported physical devs */
- int found = 0; /* found match (1) or not (0) */
- u32 find; /* handle we need to match */
+ struct io_accel2_cmd *c2 =
+ &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+ unsigned long flags;
int i;
- struct scsi_cmnd *scmd; /* scsi command within request being aborted */
- struct hpsa_scsi_dev_t *d; /* device of request being aborted */
- struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
- __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
- __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
-
- if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
- return 0; /* no match */
-
- /* point to the ioaccel2 device handle */
- c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
- if (c2a == NULL)
- return 0; /* no match */
-
- scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
- if (scmd == NULL)
- return 0; /* no match */
-
- d = scmd->device->hostdata;
- if (d == NULL)
- return 0; /* no match */
-
- it_nexus = cpu_to_le32(d->ioaccel_handle);
- scsi_nexus = c2a->scsi_nexus;
- find = le32_to_cpu(c2a->scsi_nexus);
-
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- __func__, scsi_nexus,
- d->device_id[0], d->device_id[1], d->device_id[2],
- d->device_id[3], d->device_id[4], d->device_id[5],
- d->device_id[6], d->device_id[7], d->device_id[8],
- d->device_id[9], d->device_id[10], d->device_id[11],
- d->device_id[12], d->device_id[13], d->device_id[14],
- d->device_id[15]);
-
- /* Get the list of physical devices */
- physicals = kzalloc(reportsize, GFP_KERNEL);
- if (physicals == NULL)
- return 0;
- if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
- dev_err(&h->pdev->dev,
- "Can't lookup %s device handle: report physical LUNs failed.\n",
- "HP SSD Smart Path");
- kfree(physicals);
- return 0;
- }
- nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
- responsesize;
-
- /* find ioaccel2 handle in list of physicals: */
- for (i = 0; i < nphysicals; i++) {
- struct ext_report_lun_entry *entry = &physicals->LUN[i];
-
- /* handle is in bytes 28-31 of each lun */
- if (entry->ioaccel_handle != find)
- continue; /* didn't match */
- found = 1;
- memcpy(scsi3addr, entry->lunid, 8);
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
- __func__, find,
- entry->ioaccel_handle, scsi3addr);
- break; /* found it */
- }
-
- kfree(physicals);
- if (found)
- return 1;
- else
- return 0;
+ spin_lock_irqsave(&h->devlock, flags);
+ for (i = 0; i < h->ndevices; i++)
+ if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
+ memcpy(scsi3addr, h->dev[i]->scsi3addr,
+ sizeof(h->dev[i]->scsi3addr));
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return 0;
}
+
/*
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
* logdev. The number of luns in physdev and logdev are returned in
@@ -3036,6 +3596,8 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
(struct ext_report_lun_entry *) lunaddrbytes;
dev->ioaccel_handle = rle->ioaccel_handle;
+ if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
+ dev->hba_ioaccel_enabled = 1;
memset(id_phys, 0, sizeof(*id_phys));
rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
@@ -3050,6 +3612,7 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
else
dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
atomic_set(&dev->ioaccel_cmds_out, 0);
+ atomic_set(&dev->reset_cmds_out, 0);
}
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
@@ -3142,16 +3705,19 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
/* Figure out where the LUN ID info is coming from */
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
i, nphysicals, nlogicals, physdev_list, logdev_list);
- /* skip masked physical devices. */
- if (lunaddrbytes[3] & 0xC0 &&
- i < nphysicals + (raid_ctlr_position == 0))
- continue;
+
+ /* skip masked non-disk devices */
+ if (MASKED_DEVICE(lunaddrbytes))
+ if (i < nphysicals + (raid_ctlr_position == 0) &&
+ NON_DISK_PHYS_DEV(lunaddrbytes))
+ continue;
/* Get device type, vendor, model, device id */
if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
+ hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
this_device = currentsd[ncurrent];
/*
@@ -3170,6 +3736,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
*this_device = *tmpdevice;
+ /* do not expose masked devices */
+ if (MASKED_DEVICE(lunaddrbytes) &&
+ i < nphysicals + (raid_ctlr_position == 0)) {
+ if (h->hba_mode_enabled)
+ dev_warn(&h->pdev->dev,
+ "Masked physical device detected\n");
+ this_device->expose_state = HPSA_DO_NOT_EXPOSE;
+ } else {
+ this_device->expose_state =
+ HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
+ }
+
switch (this_device->devtype) {
case TYPE_ROM:
/* We don't *really* support actual CD-ROM devices,
@@ -3183,34 +3761,31 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ncurrent++;
break;
case TYPE_DISK:
- if (h->hba_mode_enabled) {
- /* never use raid mapper in HBA mode */
- this_device->offload_enabled = 0;
- ncurrent++;
- break;
- } else if (h->acciopath_status) {
- if (i >= nphysicals) {
- ncurrent++;
- break;
- }
- } else {
- if (i < nphysicals)
- break;
+ if (i >= nphysicals) {
ncurrent++;
break;
}
- if (h->transMethod & CFGTBL_Trans_io_accel1 ||
- h->transMethod & CFGTBL_Trans_io_accel2) {
- hpsa_get_ioaccel_drive_info(h, this_device,
- lunaddrbytes, id_phys);
- atomic_set(&this_device->ioaccel_cmds_out, 0);
- ncurrent++;
- }
+
+ if (h->hba_mode_enabled)
+ /* never use raid mapper in HBA mode */
+ this_device->offload_enabled = 0;
+ else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
+ h->transMethod & CFGTBL_Trans_io_accel2))
+ break;
+
+ hpsa_get_ioaccel_drive_info(h, this_device,
+ lunaddrbytes, id_phys);
+ atomic_set(&this_device->ioaccel_cmds_out, 0);
+ ncurrent++;
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
ncurrent++;
break;
+ case TYPE_ENCLOSURE:
+ if (h->hba_mode_enabled)
+ ncurrent++;
+ break;
case TYPE_RAID:
/* Only present the Smartarray HBA as a RAID controller.
* If it's a RAID controller other than the HBA itself
@@ -3227,7 +3802,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
if (ncurrent >= HPSA_MAX_DEVICES)
break;
}
- hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
out:
kfree(tmpdevice);
@@ -3260,7 +3834,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
struct scsi_cmnd *cmd)
{
struct scatterlist *sg;
- int use_sg, i, sg_index, chained;
+ int use_sg, i, sg_limit, chained, last_sg;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
@@ -3272,22 +3846,39 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
if (!use_sg)
goto sglist_finished;
+ /*
+ * If the number of entries is greater than the max for a single list,
+ * then we have a chained list; we will set up all but one entry in the
+ * first list (the last entry is saved for link information);
+ * otherwise, we don't have a chained list and we'll set up at each of
+ * the entries in the one list.
+ */
curr_sg = cp->SG;
- chained = 0;
- sg_index = 0;
- scsi_for_each_sg(cmd, sg, use_sg, i) {
- if (i == h->max_cmd_sg_entries - 1 &&
- use_sg > h->max_cmd_sg_entries) {
- chained = 1;
- curr_sg = h->cmd_sg_list[cp->cmdindex];
- sg_index = 0;
- }
+ chained = use_sg > h->max_cmd_sg_entries;
+ sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
+ last_sg = scsi_sg_count(cmd) - 1;
+ scsi_for_each_sg(cmd, sg, sg_limit, i) {
hpsa_set_sg_descriptor(curr_sg, sg);
curr_sg++;
}
+ if (chained) {
+ /*
+ * Continue with the chained list. Set curr_sg to the chained
+ * list. Modify the limit to the total count less the entries
+ * we've already set up. Resume the scan at the list entry
+ * where the previous loop left off.
+ */
+ curr_sg = h->cmd_sg_list[cp->cmdindex];
+ sg_limit = use_sg - sg_limit;
+ for_each_sg(sg, sg, sg_limit, i) {
+ hpsa_set_sg_descriptor(curr_sg, sg);
+ curr_sg++;
+ }
+ }
+
/* Back the pointer up to the last entry and mark it as "last". */
- (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
+ (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
if (use_sg + chained > h->maxSG)
h->maxSG = use_sg + chained;
@@ -3530,10 +4121,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
u32 len;
u32 total_len = 0;
- if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
- atomic_dec(&phys_disk->ioaccel_cmds_out);
- return IO_ACCEL_INELIGIBLE;
- }
+ BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
atomic_dec(&phys_disk->ioaccel_cmds_out);
@@ -3556,8 +4144,19 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
}
if (use_sg) {
- BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
curr_sg = cp->sg;
+ if (use_sg > h->ioaccel_maxsg) {
+ addr64 = le64_to_cpu(
+ h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
+ curr_sg->address = cpu_to_le64(addr64);
+ curr_sg->length = 0;
+ curr_sg->reserved[0] = 0;
+ curr_sg->reserved[1] = 0;
+ curr_sg->reserved[2] = 0;
+ curr_sg->chain_indicator = 0x80;
+
+ curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
+ }
scsi_for_each_sg(cmd, sg, use_sg, i) {
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
@@ -3602,14 +4201,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
memcpy(cp->cdb, cdb, sizeof(cp->cdb));
- /* fill in sg elements */
- cp->sg_count = (u8) use_sg;
-
cp->data_len = cpu_to_le32(total_len);
cp->err_ptr = cpu_to_le64(c->busaddr +
offsetof(struct io_accel2_cmd, error_data));
cp->err_len = cpu_to_le32(sizeof(cp->error_data));
+ /* fill in sg elements */
+ if (use_sg > h->ioaccel_maxsg) {
+ cp->sg_count = 1;
+ if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
+ } else
+ cp->sg_count = (u8) use_sg;
+
enqueue_cmd_and_start_io(h, c);
return 0;
}
@@ -3992,7 +4599,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
dev->phys_disk[map_index]);
}
-/* Submit commands down the "normal" RAID stack path */
+/*
+ * Submit commands down the "normal" RAID stack path
+ * All callers to hpsa_ciss_submit must check lockup_detected
+ * beforehand, before (opt.) and after calling cmd_alloc
+ */
static int hpsa_ciss_submit(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
unsigned char scsi3addr[])
@@ -4007,7 +4618,6 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
/* Fill in the request block... */
c->Request.Timeout = 0;
- memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
c->Request.CDBLen = cmd->cmd_len;
memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
@@ -4050,7 +4660,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
}
if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
- cmd_free(h, c);
+ hpsa_cmd_resolve_and_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
enqueue_cmd_and_start_io(h, c);
@@ -4058,25 +4668,125 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
return 0;
}
+static void hpsa_cmd_init(struct ctlr_info *h, int index,
+ struct CommandList *c)
+{
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ /* Zero out all of commandlist except the last field, refcount */
+ memset(c, 0, offsetof(struct CommandList, refcount));
+ c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
+ cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
+ c->err_info = h->errinfo_pool + index;
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + index * sizeof(*c->err_info);
+ c->cmdindex = index;
+ c->busaddr = (u32) cmd_dma_handle;
+ c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
+ c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
+ c->h = h;
+ c->scsi_cmd = SCSI_CMD_IDLE;
+}
+
+static void hpsa_preinitialize_commands(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct CommandList *c = h->cmd_pool + i;
+
+ hpsa_cmd_init(h, i, c);
+ atomic_set(&c->refcount, 0);
+ }
+}
+
+static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
+ struct CommandList *c)
+{
+ dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
+
+ BUG_ON(c->cmdindex != index);
+
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ c->busaddr = (u32) cmd_dma_handle;
+}
+
+static int hpsa_ioaccel_submit(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ unsigned char *scsi3addr)
+{
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ int rc = IO_ACCEL_INELIGIBLE;
+
+ cmd->host_scribble = (unsigned char *) c;
+
+ if (dev->offload_enabled) {
+ hpsa_cmd_init(h, c->cmdindex, c);
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
+ if (rc < 0) /* scsi_dma_map failed. */
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ } else if (dev->hba_ioaccel_enabled) {
+ hpsa_cmd_init(h, c->cmdindex, c);
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
+ if (rc < 0) /* scsi_dma_map failed. */
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+ return rc;
+}
+
static void hpsa_command_resubmit_worker(struct work_struct *work)
{
struct scsi_cmnd *cmd;
struct hpsa_scsi_dev_t *dev;
- struct CommandList *c =
- container_of(work, struct CommandList, work);
+ struct CommandList *c = container_of(work, struct CommandList, work);
cmd = c->scsi_cmd;
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
- return;
+ return hpsa_cmd_free_and_done(c->h, c, cmd);
+ }
+ if (c->reset_pending)
+ return hpsa_cmd_resolve_and_free(c->h, c);
+ if (c->abort_pending)
+ return hpsa_cmd_abort_and_free(c->h, c, cmd);
+ if (c->cmd_type == CMD_IOACCEL2) {
+ struct ctlr_info *h = c->h;
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ int rc;
+
+ if (c2->error_data.serv_response ==
+ IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
+ rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
+ if (rc == 0)
+ return;
+ if (rc == SCSI_MLQUEUE_HOST_BUSY) {
+ /*
+ * If we get here, it means dma mapping failed.
+ * Try again via scsi mid layer, which will
+ * then get SCSI_MLQUEUE_HOST_BUSY.
+ */
+ cmd->result = DID_IMM_RETRY << 16;
+ return hpsa_cmd_free_and_done(h, c, cmd);
+ }
+ /* else, fall thru and resubmit down CISS path */
+ }
}
+ hpsa_cmd_partial_init(c->h, c->cmdindex, c);
if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
/*
* If we get here, it means dma mapping failed. Try
* again via scsi mid layer, which will then get
* SCSI_MLQUEUE_HOST_BUSY.
+ *
+ * hpsa_ciss_submit will have already freed c
+ * if it encountered a dma mapping failure.
*/
cmd->result = DID_IMM_RETRY << 16;
cmd->scsi_done(cmd);
@@ -4094,30 +4804,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
+
+ BUG_ON(cmd->request->tag < 0);
+
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
+
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
if (unlikely(lockup_detected(h))) {
- cmd->result = DID_ERROR << 16;
- cmd->scsi_done(cmd);
- return 0;
- }
- c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- if (unlikely(lockup_detected(h))) {
- cmd->result = DID_ERROR << 16;
- cmd_free(h, c);
+ cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
+ c = cmd_tagged_alloc(h, cmd);
/*
* Call alternate submit routine for I/O accelerated commands.
@@ -4126,27 +4830,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (likely(cmd->retries == 0 &&
cmd->request->cmd_type == REQ_TYPE_FS &&
h->acciopath_status)) {
-
- cmd->host_scribble = (unsigned char *) c;
- c->cmd_type = CMD_SCSI;
- c->scsi_cmd = cmd;
-
- if (dev->offload_enabled) {
- rc = hpsa_scsi_ioaccel_raid_map(h, c);
- if (rc == 0)
- return 0; /* Sent on ioaccel path */
- if (rc < 0) { /* scsi_dma_map failed. */
- cmd_free(h, c);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- } else if (dev->ioaccel_handle) {
- rc = hpsa_scsi_ioaccel_direct_map(h, c);
- if (rc == 0)
- return 0; /* Sent on direct map path */
- if (rc < 0) { /* scsi_dma_map failed. */
- cmd_free(h, c);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
+ if (rc == 0)
+ return 0;
+ if (rc == SCSI_MLQUEUE_HOST_BUSY) {
+ hpsa_cmd_resolve_and_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
}
return hpsa_ciss_submit(h, c, cmd, scsi3addr);
@@ -4228,22 +4917,16 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
return finished;
}
-static void hpsa_unregister_scsi(struct ctlr_info *h)
-{
- /* we are being forcibly unloaded, and may not refuse. */
- scsi_remove_host(h->scsi_host);
- scsi_host_put(h->scsi_host);
- h->scsi_host = NULL;
-}
-
-static int hpsa_register_scsi(struct ctlr_info *h)
+static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
int error;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
- if (sh == NULL)
- goto fail;
+ if (sh == NULL) {
+ dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ return -ENOMEM;
+ }
sh->io_port = 0;
sh->n_io_port = 0;
@@ -4252,80 +4935,156 @@ static int hpsa_register_scsi(struct ctlr_info *h)
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
- sh->can_queue = h->nr_cmds -
- HPSA_CMDS_RESERVED_FOR_ABORTS -
- HPSA_CMDS_RESERVED_FOR_DRIVER -
- HPSA_MAX_CONCURRENT_PASSTHRUS;
+ sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
sh->cmd_per_lun = sh->can_queue;
sh->sg_tablesize = h->maxsgentries;
- h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
- error = scsi_add_host(sh, &h->pdev->dev);
- if (error)
- goto fail_host_put;
- scsi_scan_host(sh);
+ error = scsi_init_shared_tag_map(sh, sh->can_queue);
+ if (error) {
+ dev_err(&h->pdev->dev,
+ "%s: scsi_init_shared_tag_map failed for controller %d\n",
+ __func__, h->ctlr);
+ scsi_host_put(sh);
+ return error;
+ }
+ h->scsi_host = sh;
return 0;
+}
- fail_host_put:
- dev_err(&h->pdev->dev, "%s: scsi_add_host"
- " failed for controller %d\n", __func__, h->ctlr);
- scsi_host_put(sh);
- return error;
- fail:
- dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
- " failed for controller %d\n", __func__, h->ctlr);
- return -ENOMEM;
+static int hpsa_scsi_add_host(struct ctlr_info *h)
+{
+ int rv;
+
+ rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
+ if (rv) {
+ dev_err(&h->pdev->dev, "scsi_add_host failed\n");
+ return rv;
+ }
+ scsi_scan_host(h->scsi_host);
+ return 0;
}
-static int wait_for_device_to_become_ready(struct ctlr_info *h,
- unsigned char lunaddr[])
+/*
+ * The block layer has already gone to the trouble of picking out a unique,
+ * small-integer tag for this request. We use an offset from that value as
+ * an index to select our command block. (The offset allows us to reserve the
+ * low-numbered entries for our own uses.)
+ */
+static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
+{
+ int idx = scmd->request->tag;
+
+ if (idx < 0)
+ return idx;
+
+ /* Offset to leave space for internal cmds. */
+ return idx += HPSA_NRESERVED_CMDS;
+}
+
+/*
+ * Send a TEST_UNIT_READY command to the specified LUN using the specified
+ * reply queue; returns zero if the unit is ready, and non-zero otherwise.
+ */
+static int hpsa_send_test_unit_ready(struct ctlr_info *h,
+ struct CommandList *c, unsigned char lunaddr[],
+ int reply_queue)
+{
+ int rc;
+
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ if (rc)
+ return rc;
+ /* no unmap needed here because no data xfer. */
+
+ /* Check if the unit is already ready. */
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ return 0;
+
+ /*
+ * The first command sent after reset will receive "unit attention" to
+ * indicate that the LUN has been reset...this is actually what we're
+ * looking for (but, success is good too).
+ */
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
+ (c->err_info->SenseInfo[2] == NO_SENSE ||
+ c->err_info->SenseInfo[2] == UNIT_ATTENTION))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
+ * returns zero when the unit is ready, and non-zero when giving up.
+ */
+static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
+ struct CommandList *c,
+ unsigned char lunaddr[], int reply_queue)
{
int rc;
int count = 0;
int waittime = 1; /* seconds */
- struct CommandList *c;
-
- c = cmd_alloc(h);
- if (!c) {
- dev_warn(&h->pdev->dev, "out of memory in "
- "wait_for_device_to_become_ready.\n");
- return IO_ERROR;
- }
/* Send test unit ready until device ready, or give up. */
- while (count < HPSA_TUR_RETRY_LIMIT) {
+ for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
- /* Wait for a bit. do this first, because if we send
+ /*
+ * Wait for a bit. do this first, because if we send
* the TUR right away, the reset will just abort it.
*/
msleep(1000 * waittime);
- count++;
- rc = 0; /* Device ready. */
+
+ rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
+ if (!rc)
+ break;
/* Increase wait time with each try, up to a point. */
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
- waittime = waittime * 2;
+ waittime *= 2;
- /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
- (void) fill_cmd(c, TEST_UNIT_READY, h,
- NULL, 0, 0, lunaddr, TYPE_CMD);
- hpsa_scsi_do_simple_cmd_core(h, c);
- /* no unmap needed here because no data xfer. */
+ dev_warn(&h->pdev->dev,
+ "waiting %d secs for device to become ready.\n",
+ waittime);
+ }
- if (c->err_info->CommandStatus == CMD_SUCCESS)
- break;
+ return rc;
+}
- if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
- c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
- (c->err_info->SenseInfo[2] == NO_SENSE ||
- c->err_info->SenseInfo[2] == UNIT_ATTENTION))
- break;
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[],
+ int reply_queue)
+{
+ int first_queue;
+ int last_queue;
+ int rq;
+ int rc = 0;
+ struct CommandList *c;
- dev_warn(&h->pdev->dev, "waiting %d secs "
- "for device to become ready.\n", waittime);
- rc = 1; /* device not ready. */
+ c = cmd_alloc(h);
+
+ /*
+ * If no specific reply queue was requested, then send the TUR
+ * repeatedly, requesting a reply on each reply queue; otherwise execute
+ * the loop exactly once using only the specified queue.
+ */
+ if (reply_queue == DEFAULT_REPLY_QUEUE) {
+ first_queue = 0;
+ last_queue = h->nreply_queues - 1;
+ } else {
+ first_queue = reply_queue;
+ last_queue = reply_queue;
+ }
+
+ for (rq = first_queue; rq <= last_queue; rq++) {
+ rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
+ if (rc)
+ break;
}
if (rc)
@@ -4345,6 +5104,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
+ char msg[40];
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
@@ -4356,19 +5116,38 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
dev = scsicmd->device->hostdata;
if (!dev) {
- dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
- "device lookup failed.\n");
+ dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
return FAILED;
}
- dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
- h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
- /* send a reset to the SCSI LUN which the command was sent to */
- rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
- if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+
+ /* if controller locked up, we can guarantee command won't complete */
+ if (lockup_detected(h)) {
+ sprintf(msg, "cmd %d RESET FAILED, lockup detected",
+ hpsa_get_cmd_index(scsicmd));
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
+ return FAILED;
+ }
+
+ /* this reset request might be the result of a lockup; check */
+ if (detect_controller_lockup(h)) {
+ sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
+ hpsa_get_cmd_index(scsicmd));
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
+ return FAILED;
+ }
+
+ /* Do not attempt on controller */
+ if (is_hba_lunid(dev->scsi3addr))
return SUCCESS;
- dev_warn(&h->pdev->dev, "resetting device failed.\n");
- return FAILED;
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
+
+ /* send a reset to the SCSI LUN which the command was sent to */
+ rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
+ DEFAULT_REPLY_QUEUE);
+ sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
+ return rc == 0 ? SUCCESS : FAILED;
}
static void swizzle_abort_tag(u8 *tag)
@@ -4412,7 +5191,7 @@ static void hpsa_get_tag(struct ctlr_info *h,
}
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
- struct CommandList *abort, int swizzle)
+ struct CommandList *abort, int reply_queue)
{
int rc = IO_OK;
struct CommandList *c;
@@ -4420,19 +5199,15 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
__le32 tagupper, taglower;
c = cmd_alloc(h);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- return -ENOMEM;
- }
/* fill_cmd can't fail here, no buffer to map */
- (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
0, 0, scsi3addr, TYPE_MSG);
- if (swizzle)
+ if (h->needs_abort_tags_swizzled)
swizzle_abort_tag(&c->Request.CDB[4]);
- hpsa_scsi_do_simple_cmd_core(h, c);
+ (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
hpsa_get_tag(h, abort, &taglower, &tagupper);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
__func__, tagupper, taglower);
/* no unmap needed here because no data xfer. */
@@ -4440,6 +5215,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
switch (ei->CommandStatus) {
case CMD_SUCCESS:
break;
+ case CMD_TMF_STATUS:
+ rc = hpsa_evaluate_tmf_status(h, c);
+ break;
case CMD_UNABORTABLE: /* Very common, don't make noise. */
rc = -1;
break;
@@ -4456,6 +5234,48 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
return rc;
}
+static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
+ struct CommandList *command_to_abort, int reply_queue)
+{
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
+ struct io_accel2_cmd *c2a =
+ &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
+ struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
+
+ /*
+ * We're overlaying struct hpsa_tmf_struct on top of something which
+ * was allocated as a struct io_accel2_cmd, so we better be sure it
+ * actually fits, and doesn't overrun the error info space.
+ */
+ BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
+ sizeof(struct io_accel2_cmd));
+ BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
+ offsetof(struct hpsa_tmf_struct, error_len) +
+ sizeof(ac->error_len));
+
+ c->cmd_type = IOACCEL2_TMF;
+ c->scsi_cmd = SCSI_CMD_BUSY;
+
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(struct io_accel2_cmd));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ memset(ac, 0, sizeof(*c2)); /* yes this is correct */
+ ac->iu_type = IOACCEL2_IU_TMF_TYPE;
+ ac->reply_queue = reply_queue;
+ ac->tmf = IOACCEL2_TMF_ABORT;
+ ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
+ memset(ac->lun_id, 0, sizeof(ac->lun_id));
+ ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
+ ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
+ ac->error_ptr = cpu_to_le64(c->busaddr +
+ offsetof(struct io_accel2_cmd, error_data));
+ ac->error_len = cpu_to_le32(sizeof(c2->error_data));
+}
+
/* ioaccel2 path firmware cannot handle abort task requests.
* Change abort requests to physical target reset, and send to the
* address of the physical disk used for the ioaccel 2 command.
@@ -4464,7 +5284,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
*/
static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort)
+ unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
{
int rc = IO_OK;
struct scsi_cmnd *scmd; /* scsi command within request being aborted */
@@ -4483,8 +5303,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+ "Reset as abort",
scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
@@ -4506,7 +5327,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
psa[0], psa[1], psa[2], psa[3],
psa[4], psa[5], psa[6], psa[7]);
- rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+ rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -4516,7 +5337,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
}
/* wait for device to recover */
- if (wait_for_device_to_become_ready(h, psa) != 0) {
+ if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
dev_warn(&h->pdev->dev,
"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
psa[0], psa[1], psa[2], psa[3],
@@ -4533,25 +5354,94 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
return rc; /* success */
}
-/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
- * tell which kind we're dealing with, so we send the abort both ways. There
- * shouldn't be any collisions between swizzled and unswizzled tags due to the
- * way we construct our tags but we check anyway in case the assumptions which
- * make this true someday become false.
- */
+static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
+ struct CommandList *abort, int reply_queue)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ __le32 taglower, tagupper;
+ struct hpsa_scsi_dev_t *dev;
+ struct io_accel2_cmd *c2;
+
+ dev = abort->scsi_cmd->device->hostdata;
+ if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
+ return -1;
+
+ c = cmd_alloc(h);
+ setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
+ c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
+ dev_dbg(&h->pdev->dev,
+ "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
+ __func__, tagupper, taglower);
+ /* no unmap needed here because no data xfer. */
+
+ dev_dbg(&h->pdev->dev,
+ "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
+ __func__, tagupper, taglower, c2->error_data.serv_response);
+ switch (c2->error_data.serv_response) {
+ case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+ case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+ rc = 0;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+ case IOACCEL2_SERV_RESPONSE_FAILURE:
+ case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+ rc = -1;
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
+ __func__, tagupper, taglower,
+ c2->error_data.serv_response);
+ rc = -1;
+ }
+ cmd_free(h, c);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
+ tagupper, taglower);
+ return rc;
+}
+
static int hpsa_send_abort_both_ways(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort)
+ unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
{
- /* ioccelerator mode 2 commands should be aborted via the
+ /*
+ * ioccelerator mode 2 commands should be aborted via the
* accelerated path, since RAID path is unaware of these commands,
- * but underlying firmware can't handle abort TMF.
- * Change abort to physical device reset.
+ * but not all underlying firmware can handle abort TMF.
+ * Change abort to physical device reset when abort TMF is unsupported.
*/
- if (abort->cmd_type == CMD_IOACCEL2)
- return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+ if (abort->cmd_type == CMD_IOACCEL2) {
+ if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
+ return hpsa_send_abort_ioaccel2(h, abort,
+ reply_queue);
+ else
+ return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
+ abort, reply_queue);
+ }
+ return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
+}
- return hpsa_send_abort(h, scsi3addr, abort, 0) &&
- hpsa_send_abort(h, scsi3addr, abort, 1);
+/* Find out which reply queue a command was meant to return on */
+static int hpsa_extract_reply_queue(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->cmd_type == CMD_IOACCEL2)
+ return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
+ return c->Header.ReplyQueue;
+}
+
+/*
+ * Limit concurrency of abort commands to prevent
+ * over-subscription of commands
+ */
+static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
+{
+#define ABORT_CMD_WAIT_MSECS 5000
+ return !wait_event_timeout(h->abort_cmd_wait_queue,
+ atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
+ msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
}
/* Send an abort for the specified command.
@@ -4561,7 +5451,7 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
{
- int i, rc;
+ int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
struct CommandList *abort; /* pointer to command to be aborted */
@@ -4569,27 +5459,19 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
char msg[256]; /* For debug messaging. */
int ml = 0;
__le32 tagupper, taglower;
- int refcount;
+ int refcount, reply_queue;
- /* Find the controller of the command to be aborted */
- h = sdev_to_hba(sc->device);
- if (WARN(h == NULL,
- "ABORT REQUEST FAILED, Controller lookup failed.\n"))
+ if (sc == NULL)
return FAILED;
- if (lockup_detected(h))
+ if (sc->device == NULL)
return FAILED;
- /* Check that controller supports some kind of task abort */
- if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
- !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ /* Find the controller of the command to be aborted */
+ h = sdev_to_hba(sc->device);
+ if (h == NULL)
return FAILED;
- memset(msg, 0, sizeof(msg));
- ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
- h->scsi_host->host_no, sc->device->channel,
- sc->device->id, sc->device->lun);
-
/* Find the device of the command to be aborted */
dev = sc->device->hostdata;
if (!dev) {
@@ -4598,6 +5480,31 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
return FAILED;
}
+ /* If controller locked up, we can guarantee command won't complete */
+ if (lockup_detected(h)) {
+ hpsa_show_dev_msg(KERN_WARNING, h, dev,
+ "ABORT FAILED, lockup detected");
+ return FAILED;
+ }
+
+ /* This is a good time to check if controller lockup has occurred */
+ if (detect_controller_lockup(h)) {
+ hpsa_show_dev_msg(KERN_WARNING, h, dev,
+ "ABORT FAILED, new lockup detected");
+ return FAILED;
+ }
+
+ /* Check that controller supports some kind of task abort */
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
+ !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ return FAILED;
+
+ memset(msg, 0, sizeof(msg));
+ ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
+ h->scsi_host->host_no, sc->device->channel,
+ sc->device->id, sc->device->lun,
+ "Aborting command", sc);
+
/* Get SCSI command to be aborted */
abort = (struct CommandList *) sc->host_scribble;
if (abort == NULL) {
@@ -4609,50 +5516,115 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
cmd_free(h, abort);
return SUCCESS;
}
+
+ /* Don't bother trying the abort if we know it won't work. */
+ if (abort->cmd_type != CMD_IOACCEL2 &&
+ abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
+ cmd_free(h, abort);
+ return FAILED;
+ }
+
+ /*
+ * Check that we're aborting the right command.
+ * It's possible the CommandList already completed and got re-used.
+ */
+ if (abort->scsi_cmd != sc) {
+ cmd_free(h, abort);
+ return SUCCESS;
+ }
+
+ abort->abort_pending = true;
hpsa_get_tag(h, abort, &taglower, &tagupper);
+ reply_queue = hpsa_extract_reply_queue(h, abort);
ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
as = abort->scsi_cmd;
if (as != NULL)
- ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
- as->cmnd[0], as->serial_number);
- dev_dbg(&h->pdev->dev, "%s\n", msg);
- dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
- h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+ ml += sprintf(msg+ml,
+ "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
+ as->cmd_len, as->cmnd[0], as->cmnd[1],
+ as->serial_number);
+ dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
+
/*
* Command is in flight, or possibly already completed
* by the firmware (but not to the scsi mid layer) but we can't
* distinguish which. Send the abort down.
*/
- rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
+ if (wait_for_available_abort_cmd(h)) {
+ dev_warn(&h->pdev->dev,
+ "%s FAILED, timeout waiting for an abort command to become available.\n",
+ msg);
+ cmd_free(h, abort);
+ return FAILED;
+ }
+ rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
+ atomic_inc(&h->abort_cmds_available);
+ wake_up_all(&h->abort_cmd_wait_queue);
if (rc != 0) {
- dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
- dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
- h->scsi_host->host_no,
- dev->bus, dev->target, dev->lun);
+ dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
+ hpsa_show_dev_msg(KERN_WARNING, h, dev,
+ "FAILED to abort command");
cmd_free(h, abort);
return FAILED;
}
- dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
+ dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
+ wait_event(h->event_sync_wait_queue,
+ abort->scsi_cmd != sc || lockup_detected(h));
+ cmd_free(h, abort);
+ return !lockup_detected(h) ? SUCCESS : FAILED;
+}
- /* If the abort(s) above completed and actually aborted the
- * command, then the command to be aborted should already be
- * completed. If not, wait around a bit more to see if they
- * manage to complete normally.
- */
-#define ABORT_COMPLETE_WAIT_SECS 30
- for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
- refcount = atomic_read(&abort->refcount);
- if (refcount < 2) {
- cmd_free(h, abort);
- return SUCCESS;
- } else {
- msleep(100);
- }
+/*
+ * For operations with an associated SCSI command, a command block is allocated
+ * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
+ * block request tag as an index into a table of entries. cmd_tagged_free() is
+ * the complement, although cmd_free() may be called instead.
+ */
+static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
+ struct scsi_cmnd *scmd)
+{
+ int idx = hpsa_get_cmd_index(scmd);
+ struct CommandList *c = h->cmd_pool + idx;
+
+ if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
+ dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
+ idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
+ /* The index value comes from the block layer, so if it's out of
+ * bounds, it's probably not our bug.
+ */
+ BUG();
}
- dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
- msg, ABORT_COMPLETE_WAIT_SECS);
- cmd_free(h, abort);
- return FAILED;
+
+ atomic_inc(&c->refcount);
+ if (unlikely(!hpsa_is_cmd_idle(c))) {
+ /*
+ * We expect that the SCSI layer will hand us a unique tag
+ * value. Thus, there should never be a collision here between
+ * two requests...because if the selected command isn't idle
+ * then someone is going to be very disappointed.
+ */
+ dev_err(&h->pdev->dev,
+ "tag collision (tag=%d) in cmd_tagged_alloc().\n",
+ idx);
+ if (c->scsi_cmd != NULL)
+ scsi_print_command(c->scsi_cmd);
+ scsi_print_command(scmd);
+ }
+
+ hpsa_cmd_partial_init(h, idx, c);
+ return c;
+}
+
+static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
+{
+ /*
+ * Release our reference to the block. We don't need to do anything
+ * else to free it, because it is accessed by index. (There's no point
+ * in checking the result of the decrement, since we cannot guarantee
+ * that there isn't a concurrent abort which is also accessing it.)
+ */
+ (void)atomic_dec(&c->refcount);
}
/*
@@ -4660,16 +5632,15 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
* which ones are free or in use. Lock must be held when calling this.
* cmd_free() is the complement.
+ * This function never gives up and returns NULL. If it hangs,
+ * another thread must call cmd_free() to free some tags.
*/
static struct CommandList *cmd_alloc(struct ctlr_info *h)
{
struct CommandList *c;
- int i;
- union u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
- int refcount;
- unsigned long offset;
+ int refcount, i;
+ int offset = 0;
/*
* There is some *extremely* small but non-zero chance that that
@@ -4681,12 +5652,20 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
* very unlucky thread might be starved anyway, never able to
* beat the other threads. In reality, this happens so
* infrequently as to be indistinguishable from never.
+ *
+ * Note that we start allocating commands before the SCSI host structure
+ * is initialized. Since the search starts at bit zero, this
+ * all works, since we have at least one command structure available;
+ * however, it means that the structures with the low indexes have to be
+ * reserved for driver-initiated requests, while requests from the block
+ * layer will use the higher indexes.
*/
- offset = h->last_allocation; /* benignly racy */
for (;;) {
- i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
- if (unlikely(i == h->nr_cmds)) {
+ i = find_next_zero_bit(h->cmd_pool_bits,
+ HPSA_NRESERVED_CMDS,
+ offset);
+ if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
offset = 0;
continue;
}
@@ -4694,35 +5673,23 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
refcount = atomic_inc_return(&c->refcount);
if (unlikely(refcount > 1)) {
cmd_free(h, c); /* already in use */
- offset = (i + 1) % h->nr_cmds;
+ offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
set_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
break; /* it's ours now. */
}
- h->last_allocation = i; /* benignly racy */
-
- /* Zero out all of commandlist except the last field, refcount */
- memset(c, 0, offsetof(struct CommandList, refcount));
- c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
- cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(*c->err_info));
- err_dma_handle = h->errinfo_pool_dhandle
- + i * sizeof(*c->err_info);
-
- c->cmdindex = i;
-
- c->busaddr = (u32) cmd_dma_handle;
- temp64.val = (u64) err_dma_handle;
- c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
- c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
-
- c->h = h;
+ hpsa_cmd_partial_init(h, i, c);
return c;
}
+/*
+ * This is the complementary operation to cmd_alloc(). Note, however, in some
+ * corner cases it may also be used to free blocks allocated by
+ * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
+ * the clear-bit is harmless.
+ */
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
if (atomic_dec_and_test(&c->refcount)) {
@@ -4900,7 +5867,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (iocommand.buf_size > 0) {
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
- return -EFAULT;
+ return -ENOMEM;
if (iocommand.Request.Type.Direction & XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
@@ -4913,12 +5880,10 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
c = cmd_alloc(h);
- if (c == NULL) {
- rc = -ENOMEM;
- goto out_kfree;
- }
+
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
+ c->scsi_cmd = SCSI_CMD_BUSY;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
@@ -4948,10 +5913,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
}
- hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
+ rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
/* Copy the error information out */
memcpy(&iocommand.error_info, c->err_info,
@@ -5048,11 +6017,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
sg_used++;
}
c = cmd_alloc(h);
- if (c == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
+
c->cmd_type = CMD_IOCTL_PEND;
+ c->scsi_cmd = SCSI_CMD_BUSY;
c->Header.ReplyQueue = 0;
c->Header.SGList = (u8) sg_used;
c->Header.SGTotal = cpu_to_le16(sg_used);
@@ -5078,10 +6045,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
}
- hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
+ status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
+ if (status) {
+ status = -EIO;
+ goto cleanup0;
+ }
+
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
@@ -5163,14 +6135,13 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
}
}
-static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
u8 reset_type)
{
struct CommandList *c;
c = cmd_alloc(h);
- if (!c)
- return -ENOMEM;
+
/* fill_cmd can't fail here, no data buffer to map */
(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
@@ -5181,7 +6152,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
* the command either. This is the last command we will send before
* re-initializing everything, so it doesn't matter and won't leak.
*/
- return 0;
+ return;
}
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
@@ -5189,9 +6160,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
int cmd_type)
{
int pci_dir = XFER_NONE;
- struct CommandList *a; /* for commands to be aborted */
+ u64 tag; /* for commands to be aborted */
c->cmd_type = CMD_IOCTL_PEND;
+ c->scsi_cmd = SCSI_CMD_BUSY;
c->Header.ReplyQueue = 0;
if (buff != NULL && size > 0) {
c->Header.SGList = 1;
@@ -5305,10 +6277,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[7] = 0x00;
break;
case HPSA_ABORT_MSG:
- a = buff; /* point to command to be aborted */
+ memcpy(&tag, buff, sizeof(tag));
dev_dbg(&h->pdev->dev,
- "Abort Tag:0x%016llx request Tag:0x%016llx",
- a->Header.tag, c->Header.tag);
+ "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
+ tag, c->Header.tag);
c->Request.CDBLen = 16;
c->Request.type_attr_dir =
TYPE_ATTR_DIR(cmd_type,
@@ -5319,8 +6291,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[2] = 0x00; /* reserved */
c->Request.CDB[3] = 0x00; /* reserved */
/* Tag to abort goes in CDB[4]-CDB[11] */
- memcpy(&c->Request.CDB[4], &a->Header.tag,
- sizeof(a->Header.tag));
+ memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
c->Request.CDB[12] = 0x00; /* reserved */
c->Request.CDB[13] = 0x00; /* reserved */
c->Request.CDB[14] = 0x00; /* reserved */
@@ -5399,7 +6370,7 @@ static inline void finish_cmd(struct CommandList *c)
if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
|| c->cmd_type == CMD_IOACCEL2))
complete_scsi_command(c);
- else if (c->cmd_type == CMD_IOCTL_PEND)
+ else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
complete(c->waiting);
}
@@ -5733,7 +6704,7 @@ static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
/* This does a hard reset of the controller using PCI power management
* states or the using the doorbell register.
*/
-static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -5744,7 +6715,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
int rc;
struct CfgTable __iomem *cfgtable;
u32 use_doorbell;
- u32 board_id;
u16 command_register;
/* For controllers as old as the P600, this is very nearly
@@ -5760,11 +6730,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* using the doorbell register.
*/
- rc = hpsa_lookup_board_id(pdev, &board_id);
- if (rc < 0) {
- dev_warn(&pdev->dev, "Board ID not found\n");
- return rc;
- }
if (!ctlr_is_resettable(board_id)) {
dev_warn(&pdev->dev, "Controller not resettable\n");
return -ENODEV;
@@ -5930,10 +6895,22 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
return -1;
}
+static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
+{
+ if (h->msix_vector) {
+ if (h->pdev->msix_enabled)
+ pci_disable_msix(h->pdev);
+ h->msix_vector = 0;
+ } else if (h->msi_vector) {
+ if (h->pdev->msi_enabled)
+ pci_disable_msi(h->pdev);
+ h->msi_vector = 0;
+ }
+}
+
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
-
static void hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
@@ -6064,6 +7041,21 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
return 0;
}
+static void hpsa_free_cfgtables(struct ctlr_info *h)
+{
+ if (h->transtable) {
+ iounmap(h->transtable);
+ h->transtable = NULL;
+ }
+ if (h->cfgtable) {
+ iounmap(h->cfgtable);
+ h->cfgtable = NULL;
+ }
+}
+
+/* Find and map CISS config table and transfer table
++ * several items must be unmapped (freed) later
++ * */
static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
@@ -6090,25 +7082,31 @@ static int hpsa_find_cfgtables(struct ctlr_info *h)
h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index)+cfg_offset+trans_offset,
sizeof(*h->transtable));
- if (!h->transtable)
+ if (!h->transtable) {
+ dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
+ hpsa_free_cfgtables(h);
return -ENOMEM;
+ }
return 0;
}
static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
- h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+#define MIN_MAX_COMMANDS 16
+ BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
+
+ h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
/* Limit commands in memory limited kdump scenario. */
if (reset_devices && h->max_commands > 32)
h->max_commands = 32;
- if (h->max_commands < 16) {
- dev_warn(&h->pdev->dev, "Controller reports "
- "max supported commands of %d, an obvious lie. "
- "Using 16. Ensure that firmware is up to date.\n",
- h->max_commands);
- h->max_commands = 16;
+ if (h->max_commands < MIN_MAX_COMMANDS) {
+ dev_warn(&h->pdev->dev,
+ "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
+ h->max_commands,
+ MIN_MAX_COMMANDS);
+ h->max_commands = MIN_MAX_COMMANDS;
}
}
@@ -6153,6 +7151,8 @@ static void hpsa_find_board_params(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
+ if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -6222,6 +7222,8 @@ static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
* as we enter this code.)
*/
for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
+ if (h->remove_in_progress)
+ goto done;
spin_lock_irqsave(&h->lock, flags);
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
spin_unlock_irqrestore(&h->lock, flags);
@@ -6262,6 +7264,22 @@ error:
return -ENODEV;
}
+/* free items allocated or mapped by hpsa_pci_init */
+static void hpsa_free_pci_init(struct ctlr_info *h)
+{
+ hpsa_free_cfgtables(h); /* pci_init 4 */
+ iounmap(h->vaddr); /* pci_init 3 */
+ h->vaddr = NULL;
+ hpsa_disable_interrupt_mode(h); /* pci_init 2 */
+ /*
+ * call pci_disable_device before pci_release_regions per
+ * Documentation/PCI/pci.txt
+ */
+ pci_disable_device(h->pdev); /* pci_init 1 */
+ pci_release_regions(h->pdev); /* pci_init 2 */
+}
+
+/* several items must be freed later */
static int hpsa_pci_init(struct ctlr_info *h)
{
int prod_index, err;
@@ -6272,19 +7290,24 @@ static int hpsa_pci_init(struct ctlr_info *h)
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
+ h->needs_abort_tags_swizzled =
+ ctlr_needs_abort_tags_swizzled(h->board_id);
+
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
err = pci_enable_device(h->pdev);
if (err) {
- dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
+ dev_err(&h->pdev->dev, "failed to enable PCI device\n");
+ pci_disable_device(h->pdev);
return err;
}
err = pci_request_regions(h->pdev, HPSA);
if (err) {
dev_err(&h->pdev->dev,
- "cannot obtain PCI resources, aborting\n");
+ "failed to obtain PCI resources\n");
+ pci_disable_device(h->pdev);
return err;
}
@@ -6293,38 +7316,43 @@ static int hpsa_pci_init(struct ctlr_info *h)
hpsa_interrupt_mode(h);
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
- goto err_out_free_res;
+ goto clean2; /* intmode+region, pci */
h->vaddr = remap_pci_mem(h->paddr, 0x250);
if (!h->vaddr) {
+ dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
err = -ENOMEM;
- goto err_out_free_res;
+ goto clean2; /* intmode+region, pci */
}
err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
- goto err_out_free_res;
+ goto clean3; /* vaddr, intmode+region, pci */
err = hpsa_find_cfgtables(h);
if (err)
- goto err_out_free_res;
+ goto clean3; /* vaddr, intmode+region, pci */
hpsa_find_board_params(h);
if (!hpsa_CISS_signature_present(h)) {
err = -ENODEV;
- goto err_out_free_res;
+ goto clean4; /* cfgtables, vaddr, intmode+region, pci */
}
hpsa_set_driver_support_bits(h);
hpsa_p600_dma_prefetch_quirk(h);
err = hpsa_enter_simple_mode(h);
if (err)
- goto err_out_free_res;
+ goto clean4; /* cfgtables, vaddr, intmode+region, pci */
return 0;
-err_out_free_res:
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- if (h->vaddr)
- iounmap(h->vaddr);
+clean4: /* cfgtables, vaddr, intmode+region, pci */
+ hpsa_free_cfgtables(h);
+clean3: /* vaddr, intmode+region, pci */
+ iounmap(h->vaddr);
+ h->vaddr = NULL;
+clean2: /* intmode+region, pci */
+ hpsa_disable_interrupt_mode(h);
+ /*
+ * call pci_disable_device before pci_release_regions per
+ * Documentation/PCI/pci.txt
+ */
pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
return err;
@@ -6346,7 +7374,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h)
}
}
-static int hpsa_init_reset_devices(struct pci_dev *pdev)
+static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
{
int rc, i;
void __iomem *vaddr;
@@ -6382,7 +7410,7 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
iounmap(vaddr);
/* Reset the controller with a PCI power-cycle or via doorbell */
- rc = hpsa_kdump_hard_reset_controller(pdev);
+ rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
/* -ENOTSUPP here means we cannot reset the controller
* but it's already (and still) up and running in
@@ -6408,7 +7436,29 @@ out_disable:
return rc;
}
-static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
+static void hpsa_free_cmd_pool(struct ctlr_info *h)
+{
+ kfree(h->cmd_pool_bits);
+ h->cmd_pool_bits = NULL;
+ if (h->cmd_pool) {
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool,
+ h->cmd_pool_dhandle);
+ h->cmd_pool = NULL;
+ h->cmd_pool_dhandle = 0;
+ }
+ if (h->errinfo_pool) {
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ h->errinfo_pool = NULL;
+ h->errinfo_pool_dhandle = 0;
+ }
+}
+
+static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
{
h->cmd_pool_bits = kzalloc(
DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
@@ -6425,34 +7475,13 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory in %s", __func__);
goto clean_up;
}
+ hpsa_preinitialize_commands(h);
return 0;
clean_up:
hpsa_free_cmd_pool(h);
return -ENOMEM;
}
-static void hpsa_free_cmd_pool(struct ctlr_info *h)
-{
- kfree(h->cmd_pool_bits);
- if (h->cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(struct CommandList),
- h->cmd_pool, h->cmd_pool_dhandle);
- if (h->ioaccel2_cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
- if (h->errinfo_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(struct ErrorInfo),
- h->errinfo_pool,
- h->errinfo_pool_dhandle);
- if (h->ioaccel_cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(struct io_accel1_cmd),
- h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
-}
-
static void hpsa_irq_affinity_hints(struct ctlr_info *h)
{
int i, cpu;
@@ -6474,12 +7503,14 @@ static void hpsa_free_irqs(struct ctlr_info *h)
i = h->intr_mode;
irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
+ h->q[i] = 0;
return;
}
for (i = 0; i < h->msix_vector; i++) {
irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
+ h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
h->q[i] = 0;
@@ -6502,8 +7533,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
for (i = 0; i < h->msix_vector; i++) {
+ sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
rc = request_irq(h->intr[i], msixhandler,
- 0, h->devname,
+ 0, h->intrname[i],
&h->q[i]);
if (rc) {
int j;
@@ -6524,18 +7556,30 @@ static int hpsa_request_irqs(struct ctlr_info *h,
} else {
/* Use single reply pool */
if (h->msix_vector > 0 || h->msi_vector) {
+ if (h->msix_vector)
+ sprintf(h->intrname[h->intr_mode],
+ "%s-msix", h->devname);
+ else
+ sprintf(h->intrname[h->intr_mode],
+ "%s-msi", h->devname);
rc = request_irq(h->intr[h->intr_mode],
- msixhandler, 0, h->devname,
+ msixhandler, 0,
+ h->intrname[h->intr_mode],
&h->q[h->intr_mode]);
} else {
+ sprintf(h->intrname[h->intr_mode],
+ "%s-intx", h->devname);
rc = request_irq(h->intr[h->intr_mode],
- intxhandler, IRQF_SHARED, h->devname,
+ intxhandler, IRQF_SHARED,
+ h->intrname[h->intr_mode],
&h->q[h->intr_mode]);
}
+ irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
- dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
+ dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
+ hpsa_free_irqs(h);
return -ENODEV;
}
return 0;
@@ -6543,42 +7587,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
static int hpsa_kdump_soft_reset(struct ctlr_info *h)
{
- if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
- HPSA_RESET_TYPE_CONTROLLER)) {
- dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
- return -EIO;
- }
+ int rc;
+ hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
- if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
+ rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
+ if (rc) {
dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
- return -1;
+ return rc;
}
dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
- if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
+ rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
+ if (rc) {
dev_warn(&h->pdev->dev, "Board failed to become ready "
"after soft reset.\n");
- return -1;
+ return rc;
}
return 0;
}
-static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
-{
- hpsa_free_irqs(h);
-#ifdef CONFIG_PCI_MSI
- if (h->msix_vector) {
- if (h->pdev->msix_enabled)
- pci_disable_msix(h->pdev);
- } else if (h->msi_vector) {
- if (h->pdev->msi_enabled)
- pci_disable_msi(h->pdev);
- }
-#endif /* CONFIG_PCI_MSI */
-}
-
static void hpsa_free_reply_queues(struct ctlr_info *h)
{
int i;
@@ -6586,30 +7615,36 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head)
continue;
- pci_free_consistent(h->pdev, h->reply_queue_size,
- h->reply_queue[i].head, h->reply_queue[i].busaddr);
+ pci_free_consistent(h->pdev,
+ h->reply_queue_size,
+ h->reply_queue[i].head,
+ h->reply_queue[i].busaddr);
h->reply_queue[i].head = NULL;
h->reply_queue[i].busaddr = 0;
}
+ h->reply_queue_size = 0;
}
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{
- hpsa_free_irqs_and_disable_msix(h);
- hpsa_free_sg_chain_blocks(h);
- hpsa_free_cmd_pool(h);
- kfree(h->ioaccel1_blockFetchTable);
- kfree(h->blockFetchTable);
- hpsa_free_reply_queues(h);
- if (h->vaddr)
- iounmap(h->vaddr);
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- pci_disable_device(h->pdev);
- pci_release_regions(h->pdev);
- kfree(h);
+ hpsa_free_performant_mode(h); /* init_one 7 */
+ hpsa_free_sg_chain_blocks(h); /* init_one 6 */
+ hpsa_free_cmd_pool(h); /* init_one 5 */
+ hpsa_free_irqs(h); /* init_one 4 */
+ scsi_host_put(h->scsi_host); /* init_one 3 */
+ h->scsi_host = NULL; /* init_one 3 */
+ hpsa_free_pci_init(h); /* init_one 2_5 */
+ free_percpu(h->lockup_detected); /* init_one 2 */
+ h->lockup_detected = NULL; /* init_one 2 */
+ if (h->resubmit_wq) {
+ destroy_workqueue(h->resubmit_wq); /* init_one 1 */
+ h->resubmit_wq = NULL;
+ }
+ if (h->rescan_ctlr_wq) {
+ destroy_workqueue(h->rescan_ctlr_wq);
+ h->rescan_ctlr_wq = NULL;
+ }
+ kfree(h); /* init_one 1 */
}
/* Called when controller lockup detected. */
@@ -6617,17 +7652,22 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h)
{
int i, refcount;
struct CommandList *c;
+ int failcount = 0;
flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
for (i = 0; i < h->nr_cmds; i++) {
c = h->cmd_pool + i;
refcount = atomic_inc_return(&c->refcount);
if (refcount > 1) {
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
finish_cmd(c);
+ atomic_dec(&h->commands_outstanding);
+ failcount++;
}
cmd_free(h, c);
}
+ dev_warn(&h->pdev->dev,
+ "failed %d commands in fail_all\n", failcount);
}
static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
@@ -6653,18 +7693,19 @@ static void controller_lockup_detected(struct ctlr_info *h)
if (!lockup_detected) {
/* no heartbeat, but controller gave us a zero. */
dev_warn(&h->pdev->dev,
- "lockup detected but scratchpad register is zero\n");
+ "lockup detected after %d but scratchpad register is zero\n",
+ h->heartbeat_sample_interval / HZ);
lockup_detected = 0xffffffff;
}
set_lockup_detected_for_all_cpus(h, lockup_detected);
spin_unlock_irqrestore(&h->lock, flags);
- dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
- lockup_detected);
+ dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
+ lockup_detected, h->heartbeat_sample_interval / HZ);
pci_disable_device(h->pdev);
fail_all_outstanding_cmds(h);
}
-static void detect_controller_lockup(struct ctlr_info *h)
+static int detect_controller_lockup(struct ctlr_info *h)
{
u64 now;
u32 heartbeat;
@@ -6674,7 +7715,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
/* If we've received an interrupt recently, we're ok. */
if (time_after64(h->last_intr_timestamp +
(h->heartbeat_sample_interval), now))
- return;
+ return false;
/*
* If we've already checked the heartbeat recently, we're ok.
@@ -6683,7 +7724,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
*/
if (time_after64(h->last_heartbeat_timestamp +
(h->heartbeat_sample_interval), now))
- return;
+ return false;
/* If heartbeat has not changed since we last looked, we're not ok. */
spin_lock_irqsave(&h->lock, flags);
@@ -6691,12 +7732,13 @@ static void detect_controller_lockup(struct ctlr_info *h)
spin_unlock_irqrestore(&h->lock, flags);
if (h->last_heartbeat == heartbeat) {
controller_lockup_detected(h);
- return;
+ return true;
}
/* We're ok. */
h->last_heartbeat = heartbeat;
h->last_heartbeat_timestamp = now;
+ return false;
}
static void hpsa_ack_ctlr_events(struct ctlr_info *h)
@@ -6843,11 +7885,18 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
+ u32 board_id;
if (number_of_controllers == 0)
printk(KERN_INFO DRIVER_NAME "\n");
- rc = hpsa_init_reset_devices(pdev);
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Board ID not found\n");
+ return rc;
+ }
+
+ rc = hpsa_init_reset_devices(pdev, board_id);
if (rc) {
if (rc != -ENOTSUPP)
return rc;
@@ -6868,42 +7917,41 @@ reinit_after_soft_reset:
*/
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
- if (!h)
+ if (!h) {
+ dev_err(&pdev->dev, "Failed to allocate controller head\n");
return -ENOMEM;
+ }
h->pdev = pdev;
+
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->offline_device_list);
spin_lock_init(&h->lock);
spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
-
- h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
- if (!h->rescan_ctlr_wq) {
- rc = -ENOMEM;
- goto clean1;
- }
-
- h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
- if (!h->resubmit_wq) {
- rc = -ENOMEM;
- goto clean1;
- }
+ atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
if (!h->lockup_detected) {
+ dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
rc = -ENOMEM;
- goto clean1;
+ goto clean1; /* aer/h */
}
set_lockup_detected_for_all_cpus(h, 0);
rc = hpsa_pci_init(h);
- if (rc != 0)
- goto clean1;
+ if (rc)
+ goto clean2; /* lu, aer/h */
+
+ /* relies on h-> settings made by hpsa_pci_init, including
+ * interrupt_mode h->intr */
+ rc = hpsa_scsi_host_alloc(h);
+ if (rc)
+ goto clean2_5; /* pci, lu, aer/h */
- sprintf(h->devname, HPSA "%d", number_of_controllers);
+ sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
h->ctlr = number_of_controllers;
number_of_controllers++;
@@ -6917,34 +7965,57 @@ reinit_after_soft_reset:
dac = 0;
} else {
dev_err(&pdev->dev, "no suitable DMA available\n");
- goto clean1;
+ goto clean3; /* shost, pci, lu, aer/h */
}
}
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
- goto clean2;
- dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
- h->devname, pdev->device,
- h->intr[h->intr_mode], dac ? "" : " not");
- rc = hpsa_allocate_cmd_pool(h);
+ rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
+ if (rc)
+ goto clean3; /* shost, pci, lu, aer/h */
+ rc = hpsa_alloc_cmd_pool(h);
if (rc)
- goto clean2_and_free_irqs;
- if (hpsa_allocate_sg_chain_blocks(h))
- goto clean4;
+ goto clean4; /* irq, shost, pci, lu, aer/h */
+ rc = hpsa_alloc_sg_chain_blocks(h);
+ if (rc)
+ goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
init_waitqueue_head(&h->scan_wait_queue);
+ init_waitqueue_head(&h->abort_cmd_wait_queue);
+ init_waitqueue_head(&h->event_sync_wait_queue);
+ mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
pci_set_drvdata(pdev, h);
h->ndevices = 0;
h->hba_mode_enabled = 0;
- h->scsi_host = NULL;
+
spin_lock_init(&h->devlock);
- hpsa_put_ctlr_into_performant_mode(h);
+ rc = hpsa_put_ctlr_into_performant_mode(h);
+ if (rc)
+ goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
+
+ /* hook into SCSI subsystem */
+ rc = hpsa_scsi_add_host(h);
+ if (rc)
+ goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+
+ /* create the resubmit workqueue */
+ h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
+ if (!h->rescan_ctlr_wq) {
+ rc = -ENOMEM;
+ goto clean7;
+ }
- /* At this point, the controller is ready to take commands.
+ h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
+ if (!h->resubmit_wq) {
+ rc = -ENOMEM;
+ goto clean7; /* aer/h */
+ }
+
+ /*
+ * At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
* the soft reset and see if that works.
*/
@@ -6966,13 +8037,24 @@ reinit_after_soft_reset:
if (rc) {
dev_warn(&h->pdev->dev,
"Failed to request_irq after soft reset.\n");
- goto clean4;
+ /*
+ * cannot goto clean7 or free_irqs will be called
+ * again. Instead, do its work
+ */
+ hpsa_free_performant_mode(h); /* clean7 */
+ hpsa_free_sg_chain_blocks(h); /* clean6 */
+ hpsa_free_cmd_pool(h); /* clean5 */
+ /*
+ * skip hpsa_free_irqs(h) clean4 since that
+ * was just called before request_irqs failed
+ */
+ goto clean3;
}
rc = hpsa_kdump_soft_reset(h);
if (rc)
/* Neither hard nor soft reset worked, we're hosed. */
- goto clean4;
+ goto clean9;
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
@@ -6993,21 +8075,20 @@ reinit_after_soft_reset:
hpsa_undo_allocations_after_kdump_soft_reset(h);
try_soft_reset = 0;
if (rc)
- /* don't go to clean4, we already unallocated */
+ /* don't goto clean, we already unallocated */
return -ENODEV;
goto reinit_after_soft_reset;
}
- /* Enable Accelerated IO path at driver layer */
- h->acciopath_status = 1;
+ /* Enable Accelerated IO path at driver layer */
+ h->acciopath_status = 1;
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
hpsa_hba_inquiry(h);
- hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -7019,19 +8100,36 @@ reinit_after_soft_reset:
h->heartbeat_sample_interval);
return 0;
-clean4:
+clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->hba_inquiry_data);
+clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ hpsa_free_performant_mode(h);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
hpsa_free_sg_chain_blocks(h);
+clean5: /* cmd, irq, shost, pci, lu, aer/h */
hpsa_free_cmd_pool(h);
-clean2_and_free_irqs:
+clean4: /* irq, shost, pci, lu, aer/h */
hpsa_free_irqs(h);
-clean2:
-clean1:
- if (h->resubmit_wq)
+clean3: /* shost, pci, lu, aer/h */
+ scsi_host_put(h->scsi_host);
+ h->scsi_host = NULL;
+clean2_5: /* pci, lu, aer/h */
+ hpsa_free_pci_init(h);
+clean2: /* lu, aer/h */
+ if (h->lockup_detected) {
+ free_percpu(h->lockup_detected);
+ h->lockup_detected = NULL;
+ }
+clean1: /* wq/aer/h */
+ if (h->resubmit_wq) {
destroy_workqueue(h->resubmit_wq);
- if (h->rescan_ctlr_wq)
+ h->resubmit_wq = NULL;
+ }
+ if (h->rescan_ctlr_wq) {
destroy_workqueue(h->rescan_ctlr_wq);
- if (h->lockup_detected)
- free_percpu(h->lockup_detected);
+ h->rescan_ctlr_wq = NULL;
+ }
kfree(h);
return rc;
}
@@ -7040,8 +8138,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
{
char *flush_buf;
struct CommandList *c;
+ int rc;
- /* Don't bother trying to flush the cache if locked up */
if (unlikely(lockup_detected(h)))
return;
flush_buf = kzalloc(4, GFP_KERNEL);
@@ -7049,21 +8147,20 @@ static void hpsa_flush_cache(struct ctlr_info *h)
return;
c = cmd_alloc(h);
- if (!c) {
- dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
- goto out_of_memory;
- }
+
if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
RAID_CTLR_LUNID, TYPE_CMD)) {
goto out;
}
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_TODEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
if (c->err_info->CommandStatus != 0)
out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_free(h, c);
-out_of_memory:
kfree(flush_buf);
}
@@ -7078,15 +8175,18 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- hpsa_free_irqs_and_disable_msix(h);
+ hpsa_free_irqs(h); /* init_one 4 */
+ hpsa_disable_interrupt_mode(h); /* pci_init 2 */
}
static void hpsa_free_device_info(struct ctlr_info *h)
{
int i;
- for (i = 0; i < h->ndevices; i++)
+ for (i = 0; i < h->ndevices; i++) {
kfree(h->dev[i]);
+ h->dev[i] = NULL;
+ }
}
static void hpsa_remove_one(struct pci_dev *pdev)
@@ -7108,29 +8208,34 @@ static void hpsa_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&h->rescan_ctlr_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
- hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
+
+ /* includes hpsa_free_irqs - init_one 4 */
+ /* includes hpsa_disable_interrupt_mode - pci_init 2 */
hpsa_shutdown(pdev);
- iounmap(h->vaddr);
- iounmap(h->transtable);
- iounmap(h->cfgtable);
- hpsa_free_device_info(h);
- hpsa_free_sg_chain_blocks(h);
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(struct CommandList),
- h->cmd_pool, h->cmd_pool_dhandle);
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(struct ErrorInfo),
- h->errinfo_pool, h->errinfo_pool_dhandle);
- hpsa_free_reply_queues(h);
- kfree(h->cmd_pool_bits);
- kfree(h->blockFetchTable);
- kfree(h->ioaccel1_blockFetchTable);
- kfree(h->ioaccel2_blockFetchTable);
- kfree(h->hba_inquiry_data);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- free_percpu(h->lockup_detected);
- kfree(h);
+
+ hpsa_free_device_info(h); /* scan */
+
+ kfree(h->hba_inquiry_data); /* init_one 10 */
+ h->hba_inquiry_data = NULL; /* init_one 10 */
+ if (h->scsi_host)
+ scsi_remove_host(h->scsi_host); /* init_one 8 */
+ hpsa_free_ioaccel2_sg_chain_blocks(h);
+ hpsa_free_performant_mode(h); /* init_one 7 */
+ hpsa_free_sg_chain_blocks(h); /* init_one 6 */
+ hpsa_free_cmd_pool(h); /* init_one 5 */
+
+ /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
+
+ scsi_host_put(h->scsi_host); /* init_one 3 */
+ h->scsi_host = NULL; /* init_one 3 */
+
+ /* includes hpsa_disable_interrupt_mode - pci_init 2 */
+ hpsa_free_pci_init(h); /* init_one 2.5 */
+
+ free_percpu(h->lockup_detected); /* init_one 2 */
+ h->lockup_detected = NULL; /* init_one 2 */
+ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
+ kfree(h); /* init_one 1 */
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
@@ -7188,7 +8293,10 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-/* return -ENODEV or other reason on error, 0 on success */
+/*
+ * return -ENODEV on err, 0 on success (or no action)
+ * allocates numerous items that must be freed later
+ */
static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
{
int i;
@@ -7370,7 +8478,23 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
return 0;
}
-static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+/* Free ioaccel1 mode command blocks and block fetch table */
+static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
+{
+ if (h->ioaccel_cmd_pool) {
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool,
+ h->ioaccel_cmd_pool_dhandle);
+ h->ioaccel_cmd_pool = NULL;
+ h->ioaccel_cmd_pool_dhandle = 0;
+ }
+ kfree(h->ioaccel1_blockFetchTable);
+ h->ioaccel1_blockFetchTable = NULL;
+}
+
+/* Allocate ioaccel1 mode command blocks and block fetch table */
+static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
{
h->ioaccel_maxsg =
readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
@@ -7401,16 +8525,32 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
return 0;
clean_up:
- if (h->ioaccel_cmd_pool)
+ hpsa_free_ioaccel1_cmd_and_bft(h);
+ return -ENOMEM;
+}
+
+/* Free ioaccel2 mode command blocks and block fetch table */
+static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
+{
+ hpsa_free_ioaccel2_sg_chain_blocks(h);
+
+ if (h->ioaccel2_cmd_pool) {
pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
- kfree(h->ioaccel1_blockFetchTable);
- return 1;
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool,
+ h->ioaccel2_cmd_pool_dhandle);
+ h->ioaccel2_cmd_pool = NULL;
+ h->ioaccel2_cmd_pool_dhandle = 0;
+ }
+ kfree(h->ioaccel2_blockFetchTable);
+ h->ioaccel2_blockFetchTable = NULL;
}
-static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+/* Allocate ioaccel2 mode command blocks and block fetch table */
+static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
{
+ int rc;
+
/* Allocate ioaccel2 mode command blocks and block fetch table */
h->ioaccel_maxsg =
@@ -7430,7 +8570,13 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
sizeof(u32)), GFP_KERNEL);
if ((h->ioaccel2_cmd_pool == NULL) ||
- (h->ioaccel2_blockFetchTable == NULL))
+ (h->ioaccel2_blockFetchTable == NULL)) {
+ rc = -ENOMEM;
+ goto clean_up;
+ }
+
+ rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
+ if (rc)
goto clean_up;
memset(h->ioaccel2_cmd_pool, 0,
@@ -7438,41 +8584,50 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
return 0;
clean_up:
- if (h->ioaccel2_cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
- kfree(h->ioaccel2_blockFetchTable);
- return 1;
+ hpsa_free_ioaccel2_cmd_and_bft(h);
+ return rc;
}
-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
+static void hpsa_free_performant_mode(struct ctlr_info *h)
+{
+ kfree(h->blockFetchTable);
+ h->blockFetchTable = NULL;
+ hpsa_free_reply_queues(h);
+ hpsa_free_ioaccel1_cmd_and_bft(h);
+ hpsa_free_ioaccel2_cmd_and_bft(h);
+}
+
+/* return -ENODEV on error, 0 on success (or no action)
+ * allocates numerous items that must be freed later
+ */
+static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
unsigned long transMethod = CFGTBL_Trans_Performant |
CFGTBL_Trans_use_short_tags;
- int i;
+ int i, rc;
if (hpsa_simple_mode)
- return;
+ return 0;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
- return;
+ return 0;
/* Check for I/O accelerator mode support */
if (trans_support & CFGTBL_Trans_io_accel1) {
transMethod |= CFGTBL_Trans_io_accel1 |
CFGTBL_Trans_enable_directed_msix;
- if (hpsa_alloc_ioaccel_cmd_and_bft(h))
- goto clean_up;
- } else {
- if (trans_support & CFGTBL_Trans_io_accel2) {
- transMethod |= CFGTBL_Trans_io_accel2 |
+ rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
+ if (rc)
+ return rc;
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
+ transMethod |= CFGTBL_Trans_io_accel2 |
CFGTBL_Trans_enable_directed_msix;
- if (ioaccel2_alloc_cmds_and_bft(h))
- goto clean_up;
- }
+ rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
+ if (rc)
+ return rc;
}
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
@@ -7484,8 +8639,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
h->reply_queue_size,
&(h->reply_queue[i].busaddr));
- if (!h->reply_queue[i].head)
- goto clean_up;
+ if (!h->reply_queue[i].head) {
+ rc = -ENOMEM;
+ goto clean1; /* rq, ioaccel */
+ }
h->reply_queue[i].size = h->max_commands;
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
h->reply_queue[i].current_entry = 0;
@@ -7494,15 +8651,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
- if (!h->blockFetchTable)
- goto clean_up;
+ if (!h->blockFetchTable) {
+ rc = -ENOMEM;
+ goto clean1; /* rq, ioaccel */
+ }
- hpsa_enter_performant_mode(h, trans_support);
- return;
+ rc = hpsa_enter_performant_mode(h, trans_support);
+ if (rc)
+ goto clean2; /* bft, rq, ioaccel */
+ return 0;
-clean_up:
- hpsa_free_reply_queues(h);
+clean2: /* bft, rq, ioaccel */
kfree(h->blockFetchTable);
+ h->blockFetchTable = NULL;
+clean1: /* rq, ioaccel */
+ hpsa_free_reply_queues(h);
+ hpsa_free_ioaccel1_cmd_and_bft(h);
+ hpsa_free_ioaccel2_cmd_and_bft(h);
+ return rc;
}
static int is_accelerated_cmd(struct CommandList *c)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 657713050349..6ee4da6b1153 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -47,6 +47,7 @@ struct hpsa_scsi_dev_t {
unsigned char raid_level; /* from inquiry page 0xC1 */
unsigned char volume_offline; /* discovered via TUR or VPD */
u16 queue_depth; /* max queue_depth for this device */
+ atomic_t reset_cmds_out; /* Count of commands to-be affected */
atomic_t ioaccel_cmds_out; /* Only used for physical devices
* counts commands sent to physical
* device via "ioaccel" path.
@@ -54,6 +55,8 @@ struct hpsa_scsi_dev_t {
u32 ioaccel_handle;
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_to_be_enabled;
+ int hba_ioaccel_enabled;
int offload_to_mirror; /* Send next I/O accelerator RAID
* offload request to mirror drive
*/
@@ -68,6 +71,13 @@ struct hpsa_scsi_dev_t {
* devices in order to honor physical device queue depth limits.
*/
struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
+ int nphysical_disks;
+ int supports_aborts;
+#define HPSA_DO_NOT_EXPOSE 0x0
+#define HPSA_SG_ATTACH 0x1
+#define HPSA_ULD_ATTACH 0x2
+#define HPSA_SCSI_ADD (HPSA_SG_ATTACH | HPSA_ULD_ATTACH)
+ u8 expose_state;
};
struct reply_queue_buffer {
@@ -133,7 +143,6 @@ struct ctlr_info {
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
int max_commands;
- int last_allocation;
atomic_t commands_outstanding;
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
@@ -154,6 +163,7 @@ struct ctlr_info {
u8 max_cmd_sg_entries;
int chainsize;
struct SGDescriptor **cmd_sg_list;
+ struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
/* pointers to command and error info pool */
struct CommandList *cmd_pool;
@@ -211,6 +221,7 @@ struct ctlr_info {
int remove_in_progress;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
+ char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
#define HPSATMF_BITS_SUPPORTED (1 << 0)
#define HPSATMF_PHYS_LUN_RESET (1 << 1)
@@ -222,6 +233,7 @@ struct ctlr_info {
#define HPSATMF_PHYS_QRY_TASK (1 << 7)
#define HPSATMF_PHYS_QRY_TSET (1 << 8)
#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
+#define HPSATMF_IOACCEL_ENABLED (1 << 15)
#define HPSATMF_MASK_SUPPORTED (1 << 16)
#define HPSATMF_LOG_LUN_RESET (1 << 17)
#define HPSATMF_LOG_NEX_RESET (1 << 18)
@@ -251,8 +263,13 @@ struct ctlr_info {
struct list_head offline_device_list;
int acciopath_status;
int raid_offload_debug;
+ int needs_abort_tags_swizzled;
struct workqueue_struct *resubmit_wq;
struct workqueue_struct *rescan_ctlr_wq;
+ atomic_t abort_cmds_available;
+ wait_queue_head_t abort_cmd_wait_queue;
+ wait_queue_head_t event_sync_wait_queue;
+ struct mutex reset_mutex;
};
struct offline_device_entry {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3a621c74b76f..c601622cc98e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -42,8 +42,22 @@
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
+#define CMD_TMF_STATUS 0x000D
#define CMD_IOACCEL_DISABLED 0x000E
+#define CMD_CTLR_LOCKUP 0xffff
+/* Note: CMD_CTLR_LOCKUP is not a value defined by the CISS spec
+ * it is a value defined by the driver that commands can be marked
+ * with when a controller lockup has been detected by the driver
+ */
+/* TMF function status values */
+#define CISS_TMF_COMPLETE 0x00
+#define CISS_TMF_INVALID_FRAME 0x02
+#define CISS_TMF_NOT_SUPPORTED 0x04
+#define CISS_TMF_FAILED 0x05
+#define CISS_TMF_SUCCESS 0x08
+#define CISS_TMF_WRONG_LUN 0x09
+#define CISS_TMF_OVERLAPPED_TAG 0x0a
/* Unit Attentions ASC's as defined for the MSA2012sa */
#define POWER_OR_RESET 0x29
@@ -240,6 +254,7 @@ struct ReportLUNdata {
struct ext_report_lun_entry {
u8 lunid[8];
+#define MASKED_DEVICE(x) ((x)[3] & 0xC0)
#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
@@ -247,6 +262,8 @@ struct ext_report_lun_entry {
u8 wwid[8];
u8 device_type;
u8 device_flags;
+#define NON_DISK_PHYS_DEV(x) ((x)[17] & 0x01)
+#define PHYS_IOACCEL(x) ((x)[17] & 0x08)
u8 lun_count; /* multi-lun device, how many luns */
u8 redundant_paths;
u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
@@ -379,6 +396,7 @@ struct ErrorInfo {
#define CMD_SCSI 0x03
#define CMD_IOACCEL1 0x04
#define CMD_IOACCEL2 0x05
+#define IOACCEL2_TMF 0x06
#define DIRECT_LOOKUP_SHIFT 4
#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
@@ -421,7 +439,10 @@ struct CommandList {
* not used.
*/
struct hpsa_scsi_dev_t *phys_disk;
- atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */
+
+ int abort_pending;
+ struct hpsa_scsi_dev_t *reset_pending;
+ atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
} __aligned(COMMANDLIST_ALIGNMENT);
/* Max S/G elements in I/O accelerator command */
@@ -515,6 +536,12 @@ struct io_accel2_scsi_response {
#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
+#define IOACCEL2_STATUS_SR_IO_ERROR 0x01
+#define IOACCEL2_STATUS_SR_IO_ABORTED 0x02
+#define IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE 0x03
+#define IOACCEL2_STATUS_SR_INVALID_DEVICE 0x04
+#define IOACCEL2_STATUS_SR_UNDERRUN 0x51
+#define IOACCEL2_STATUS_SR_OVERRUN 0x75
u8 data_present; /* low 2 bits */
#define IOACCEL2_NO_DATAPRESENT 0x000
#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
@@ -567,6 +594,7 @@ struct io_accel2_cmd {
#define IOACCEL2_DIR_NO_DATA 0x00
#define IOACCEL2_DIR_DATA_IN 0x01
#define IOACCEL2_DIR_DATA_OUT 0x02
+#define IOACCEL2_TMF_ABORT 0x01
/*
* SCSI Task Management Request format for Accelerator Mode 2
*/
@@ -575,13 +603,13 @@ struct hpsa_tmf_struct {
u8 reply_queue; /* Reply Queue ID */
u8 tmf; /* Task Management Function */
u8 reserved1; /* byte 3 Reserved */
- u32 it_nexus; /* SCSI I-T Nexus */
+ __le32 it_nexus; /* SCSI I-T Nexus */
u8 lun_id[8]; /* LUN ID for TMF request */
__le64 tag; /* cciss tag associated w/ request */
__le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
__le64 error_ptr; /* Error Pointer */
__le32 error_len; /* Error Length */
-};
+} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/* Configuration Table Structure */
struct HostWrite {
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 89a8266560d0..4e1a632ccf16 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1109,7 +1109,6 @@ static struct scsi_host_template imm_template = {
.bios_param = imm_biosparam,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
.slave_alloc = imm_adjust_queue,
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e5dae7b54d9a..6a926bae76b2 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2833,7 +2833,6 @@ static struct scsi_host_template initio_template = {
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 47412cf4eaac..73790a1d0969 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -272,7 +272,7 @@
#define IPR_RUNTIME_RESET 0x40000000
#define IPR_IPL_INIT_MIN_STAGE_TIME 5
-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30
#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
#define IPR_IPL_INIT_STAGE_MASK 0xff000000
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7542f11d3fcd..02cb76fd4420 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -206,10 +206,6 @@ module_param(ips, charp, 0);
#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
-#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
-#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
-#endif
-
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \
@@ -6788,6 +6784,11 @@ ips_remove_device(struct pci_dev *pci_dev)
static int __init
ips_module_init(void)
{
+#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
+ printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+#endif
+
if (pci_register_driver(&ips_pci_driver) < 0)
return -ENODEV;
ips_driver_template.module = THIS_MODULE;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index cd41b63a2f10..0dfcabe3ca7c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -160,7 +160,6 @@ static struct scsi_host_template isci_sht = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = ISCI_CAN_QUEUE_VAL,
- .cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9b81a34d7449..a5a56fa31e70 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -230,6 +230,8 @@ struct lpfc_stats {
uint32_t elsRcvRRQ;
uint32_t elsRcvRTV;
uint32_t elsRcvECHO;
+ uint32_t elsRcvLCB;
+ uint32_t elsRcvRDP;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 587e3e962f2b..b0e6fe46448d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -498,3 +498,5 @@ bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
struct lpfc_name *, uint64_t *, struct lpfc_name *,
struct lpfc_name *, uint64_t *, uint32_t *);
+int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
+void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 6977027979be..361f5b3d9d93 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -79,7 +79,6 @@ struct lpfc_nodelist {
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
uint32_t nlp_flag; /* entry flags */
- uint32_t nlp_add_flag; /* additional flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
uint16_t nlp_type;
@@ -147,6 +146,7 @@ struct lpfc_node_rrq {
#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
+#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */
#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
ACC */
#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@@ -158,8 +158,6 @@ struct lpfc_node_rrq {
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
-/* Defines for nlp_add_flag (uint32) */
-#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */
/* ndlp usage management macros */
#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 851e8efe364e..36bf58ba750a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1509,12 +1509,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
struct lpfc_rport_data *rdata;
struct fc_rport *rport;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
- uint32_t rc, keepDID = 0;
+ uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+ uint16_t keep_nlp_state;
int put_node;
int put_rport;
unsigned long *active_rrqs_xri_bitmap = NULL;
@@ -1603,11 +1605,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
- new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_lock_irq(shost->host_lock);
+ keep_nlp_flag = new_ndlp->nlp_flag;
+ new_ndlp->nlp_flag = ndlp->nlp_flag;
+ ndlp->nlp_flag = keep_nlp_flag;
+ spin_unlock_irq(shost->host_lock);
- /* Set state will put new_ndlp on to node list if not already done */
+ /* Set nlp_states accordingly */
+ keep_nlp_state = new_ndlp->nlp_state;
lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
/* Move this back to NPR state */
@@ -1624,8 +1629,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (rport) {
rdata = rport->dd_data;
if (rdata->pnode == ndlp) {
- lpfc_nlp_put(ndlp);
+ /* break the link before dropping the ref */
ndlp->rport = NULL;
+ lpfc_nlp_put(ndlp);
rdata->pnode = lpfc_nlp_get(new_ndlp);
new_ndlp->rport = rport;
}
@@ -1648,7 +1654,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
memcpy(ndlp->active_rrqs_xri_bitmap,
active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- lpfc_drop_node(vport, ndlp);
+
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ lpfc_drop_node(vport, ndlp);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1665,20 +1673,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- /* Since we are swapping the ndlp passed in with the new one
- * and the did has already been swapped, copy over state.
- * The new WWNs are already in new_ndlp since thats what
- * we looked it up by in the begining of this routine.
- */
- new_ndlp->nlp_state = ndlp->nlp_state;
-
- /* Since we are switching over to the new_ndlp, the old
- * ndlp should be put in the NPR state, unless we have
- * already started re-discovery on it.
+ /* Since we are switching over to the new_ndlp,
+ * reset the old ndlp state
*/
if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
(ndlp->nlp_state == NLP_STE_MAPPED_NODE))
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ keep_nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
/* Fix up the rport accordingly */
rport = ndlp->rport;
@@ -3667,15 +3668,6 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
-
- /*
- * Remove the ndlp reference if it's a fabric node that has
- * sent us an unsolicted LOGO.
- */
- if (ndlp->nlp_type & NLP_FABRIC)
- lpfc_nlp_put(ndlp);
-
- return;
}
/**
@@ -4020,7 +4012,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ndlp->nlp_rpi, vport->fc_flag);
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+ ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
} else {
@@ -4587,16 +4581,16 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
- (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+ (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
+ (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
sentplogi++;
vport->num_disc_nodes++;
if (vport->num_disc_nodes >=
- vport->cfg_discovery_threads) {
+ vport->cfg_discovery_threads) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_NLP_MORE;
spin_unlock_irq(shost->host_lock);
@@ -4615,6 +4609,660 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
+void
+lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
+ uint32_t word0)
+{
+
+ desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
+ desc->payload.els_req = word0;
+ desc->length = cpu_to_be32(sizeof(desc->payload));
+}
+
+void
+lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
+ uint8_t *page_a0, uint8_t *page_a2)
+{
+ uint16_t wavelength;
+ uint16_t temperature;
+ uint16_t rx_power;
+ uint16_t tx_bias;
+ uint16_t tx_power;
+ uint16_t vcc;
+ uint16_t flag = 0;
+ struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
+ struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
+
+ desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
+
+ trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
+ &page_a0[SSF_TRANSCEIVER_CODE_B4];
+ trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
+ &page_a0[SSF_TRANSCEIVER_CODE_B5];
+
+ if ((trasn_code_byte4->fc_sw_laser) ||
+ (trasn_code_byte5->fc_sw_laser_sl) ||
+ (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
+ flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
+ } else if (trasn_code_byte4->fc_lw_laser) {
+ wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
+ page_a0[SSF_WAVELENGTH_B0];
+ if (wavelength == SFP_WAVELENGTH_LC1310)
+ flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
+ if (wavelength == SFP_WAVELENGTH_LL1550)
+ flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
+ }
+ /* check if its SFP+ */
+ flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
+ SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
+ << SFP_FLAG_CT_SHIFT;
+
+ /* check if its OPTICAL */
+ flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
+ SFP_FLAG_IS_OPTICAL_PORT : 0)
+ << SFP_FLAG_IS_OPTICAL_SHIFT;
+
+ temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
+ page_a2[SFF_TEMPERATURE_B0]);
+ vcc = (page_a2[SFF_VCC_B1] << 8 |
+ page_a2[SFF_VCC_B0]);
+ tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
+ page_a2[SFF_TXPOWER_B0]);
+ tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
+ page_a2[SFF_TX_BIAS_CURRENT_B0]);
+ rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
+ page_a2[SFF_RXPOWER_B0]);
+ desc->sfp_info.temperature = cpu_to_be16(temperature);
+ desc->sfp_info.rx_power = cpu_to_be16(rx_power);
+ desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
+ desc->sfp_info.tx_power = cpu_to_be16(tx_power);
+ desc->sfp_info.vcc = cpu_to_be16(vcc);
+
+ desc->sfp_info.flags = cpu_to_be16(flag);
+ desc->length = cpu_to_be32(sizeof(desc->sfp_info));
+}
+
+void
+lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
+ READ_LNK_VAR *stat)
+{
+ uint32_t type;
+
+ desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
+
+ type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
+
+ desc->info.port_type = cpu_to_be32(type);
+
+ desc->info.link_status.link_failure_cnt =
+ cpu_to_be32(stat->linkFailureCnt);
+ desc->info.link_status.loss_of_synch_cnt =
+ cpu_to_be32(stat->lossSyncCnt);
+ desc->info.link_status.loss_of_signal_cnt =
+ cpu_to_be32(stat->lossSignalCnt);
+ desc->info.link_status.primitive_seq_proto_err =
+ cpu_to_be32(stat->primSeqErrCnt);
+ desc->info.link_status.invalid_trans_word =
+ cpu_to_be32(stat->invalidXmitWord);
+ desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
+
+ desc->length = cpu_to_be32(sizeof(desc->info));
+}
+
+void
+lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
+{
+ uint16_t rdp_cap = 0;
+ uint16_t rdp_speed;
+
+ desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
+
+ switch (phba->sli4_hba.link_state.speed) {
+ case LPFC_FC_LA_SPEED_1G:
+ rdp_speed = RDP_PS_1GB;
+ break;
+ case LPFC_FC_LA_SPEED_2G:
+ rdp_speed = RDP_PS_2GB;
+ break;
+ case LPFC_FC_LA_SPEED_4G:
+ rdp_speed = RDP_PS_4GB;
+ break;
+ case LPFC_FC_LA_SPEED_8G:
+ rdp_speed = RDP_PS_8GB;
+ break;
+ case LPFC_FC_LA_SPEED_10G:
+ rdp_speed = RDP_PS_10GB;
+ break;
+ case LPFC_FC_LA_SPEED_16G:
+ rdp_speed = RDP_PS_16GB;
+ break;
+ case LPFC_FC_LA_SPEED_32G:
+ rdp_speed = RDP_PS_32GB;
+ break;
+ default:
+ rdp_speed = RDP_PS_UNKNOWN;
+ break;
+ }
+
+ desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+
+ if (phba->lmt & LMT_16Gb)
+ rdp_cap |= RDP_PS_16GB;
+ if (phba->lmt & LMT_10Gb)
+ rdp_cap |= RDP_PS_10GB;
+ if (phba->lmt & LMT_8Gb)
+ rdp_cap |= RDP_PS_8GB;
+ if (phba->lmt & LMT_4Gb)
+ rdp_cap |= RDP_PS_4GB;
+ if (phba->lmt & LMT_2Gb)
+ rdp_cap |= RDP_PS_2GB;
+ if (phba->lmt & LMT_1Gb)
+ rdp_cap |= RDP_PS_1GB;
+
+ if (rdp_cap == 0)
+ rdp_cap = RDP_CAP_UNKNOWN;
+
+ desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
+ desc->length = cpu_to_be32(sizeof(desc->info));
+}
+
+void
+lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
+ struct lpfc_hba *phba)
+{
+
+ desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
+
+ memcpy(desc->port_names.wwnn, phba->wwnn,
+ sizeof(desc->port_names.wwnn));
+
+ memcpy(desc->port_names.wwpn, &phba->wwpn,
+ sizeof(desc->port_names.wwpn));
+
+ desc->length = cpu_to_be32(sizeof(desc->port_names));
+}
+
+void
+lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
+ struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+
+ desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
+ if (vport->fc_flag & FC_FABRIC) {
+ memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
+ sizeof(desc->port_names.wwnn));
+
+ memcpy(desc->port_names.wwpn, &vport->fabric_portname,
+ sizeof(desc->port_names.wwpn));
+ } else { /* Point to Point */
+ memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
+ sizeof(desc->port_names.wwnn));
+
+ memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
+ sizeof(desc->port_names.wwpn));
+ }
+
+ desc->length = cpu_to_be32(sizeof(desc->port_names));
+}
+
+void
+lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
+ int status)
+{
+ struct lpfc_nodelist *ndlp = rdp_context->ndlp;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_iocbq *elsiocb;
+ IOCB_t *icmd;
+ uint8_t *pcmd;
+ struct ls_rjt *stat;
+ struct fc_rdp_res_frame *rdp_res;
+ uint32_t cmdsize;
+ int rc;
+
+ if (status != SUCCESS)
+ goto error;
+ cmdsize = sizeof(struct fc_rdp_res_frame);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
+ lpfc_max_els_tries, rdp_context->ndlp,
+ rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
+ lpfc_nlp_put(ndlp);
+ if (!elsiocb)
+ goto free_rdp_context;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2171 Xmit RDP response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ rdp_res = (struct fc_rdp_res_frame *)
+ (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+
+ /* For RDP payload */
+ lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP);
+
+ lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc,
+ rdp_context->page_a0, rdp_context->page_a2);
+ lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba);
+ lpfc_rdp_res_link_error(&rdp_res->link_error_desc,
+ &rdp_context->link_stat);
+ lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba);
+ lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc,
+ vport, ndlp);
+ rdp_res->length = cpu_to_be32(RDP_DESC_PAYLOAD_SIZE);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+ phba->fc_stat.elsXmitACC++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+
+ kfree(rdp_context);
+
+ return;
+error:
+ cmdsize = 2 * sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
+ ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_nlp_put(ndlp);
+ if (!elsiocb)
+ goto free_rdp_context;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+ stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
+ stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+
+ phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+ if (rc == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+free_rdp_context:
+ kfree(rdp_context);
+}
+
+int
+lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
+{
+ LPFC_MBOXQ_t *mbox = NULL;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
+ "7105 failed to allocate mailbox memory");
+ return 1;
+ }
+
+ if (lpfc_sli4_dump_page_a0(phba, mbox))
+ goto prep_mbox_fail;
+ mbox->vport = rdp_context->ndlp->vport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
+ mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto issue_mbox_fail;
+
+ return 0;
+
+prep_mbox_fail:
+issue_mbox_fail:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+}
+
+/*
+ * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
+ * IOCB. First, the payload of the unsolicited RDP is checked.
+ * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
+ * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
+ * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
+ * gather all data and send RDP response.
+ *
+ * Return code
+ * 0 - Sent the acc response
+ * 1 - Sent the reject response.
+ */
+static int
+lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *pcmd;
+ uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
+ struct fc_rdp_req_frame *rdp_req;
+ struct lpfc_rdp_context *rdp_context;
+ IOCB_t *cmd = NULL;
+ struct ls_rjt stat;
+
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_expl = LSEXP_REQ_UNSUPPORTED;
+ goto error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_expl = LSEXP_REQ_UNSUPPORTED;
+ goto error;
+ }
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
+
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2422 ELS RDP Request "
+ "dec len %d tag x%x port_id %d len %d\n",
+ be32_to_cpu(rdp_req->rdp_des_length),
+ be32_to_cpu(rdp_req->nport_id_desc.tag),
+ be32_to_cpu(rdp_req->nport_id_desc.nport_id),
+ be32_to_cpu(rdp_req->nport_id_desc.length));
+
+ if (sizeof(struct fc_rdp_nport_desc) !=
+ be32_to_cpu(rdp_req->rdp_des_length))
+ goto rjt_logerr;
+ if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
+ goto rjt_logerr;
+ if (RDP_NPORT_ID_SIZE !=
+ be32_to_cpu(rdp_req->nport_id_desc.length))
+ goto rjt_logerr;
+ rdp_context = kmalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
+ if (!rdp_context) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto error;
+ }
+
+ memset(rdp_context, 0, sizeof(struct lpfc_rdp_context));
+ cmd = &cmdiocb->iocb;
+ rdp_context->ndlp = lpfc_nlp_get(ndlp);
+ rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
+ rdp_context->rx_id = cmd->ulpContext;
+ rdp_context->cmpl = lpfc_els_rdp_cmpl;
+ if (lpfc_get_rdp_info(phba, rdp_context)) {
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
+ "2423 Unable to send mailbox");
+ kfree(rdp_context);
+ rjt_err = LSRJT_UNABLE_TPC;
+ lpfc_nlp_put(ndlp);
+ goto error;
+ }
+
+ return 0;
+
+rjt_logerr:
+ rjt_err = LSRJT_LOGICAL_ERR;
+
+error:
+ memset(&stat, 0, sizeof(stat));
+ stat.un.b.lsRjtRsnCode = rjt_err;
+ stat.un.b.lsRjtRsnCodeExp = rjt_expl;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 1;
+}
+
+
+static void
+lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ IOCB_t *icmd;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ struct ls_rjt *stat;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_lcb_context *lcb_context;
+ struct fc_lcb_res_frame *lcb_res;
+ uint32_t cmdsize, shdr_status, shdr_add_status;
+ int rc;
+
+ mb = &pmb->u.mb;
+ lcb_context = (struct lpfc_lcb_context *)pmb->context1;
+ ndlp = lcb_context->ndlp;
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
+ "0194 SET_BEACON_CONFIG mailbox "
+ "completed with status x%x add_status x%x,"
+ " mbx status x%x\n",
+ shdr_status, shdr_add_status, mb->mbxStatus);
+
+ if (mb->mbxStatus && !(shdr_status &&
+ shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ goto error;
+ }
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ cmdsize = sizeof(struct fc_lcb_res_frame);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
+ lpfc_nlp_put(ndlp);
+
+ if (!elsiocb)
+ goto free_lcb_context;
+
+ lcb_res = (struct fc_lcb_res_frame *)
+ (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+
+ pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
+ lcb_res->lcb_sub_command = lcb_context->sub_command;
+ lcb_res->lcb_type = lcb_context->type;
+ lcb_res->lcb_frequency = lcb_context->frequency;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+
+ kfree(lcb_context);
+ return;
+
+error:
+ cmdsize = sizeof(struct fc_lcb_res_frame);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_nlp_put(ndlp);
+ if (!elsiocb)
+ goto free_lcb_context;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
+ stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
+ stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitLSRJT++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+free_lcb_context:
+ kfree(lcb_context);
+}
+
+static int
+lpfc_sli4_set_beacon(struct lpfc_vport *vport,
+ struct lpfc_lcb_context *lcb_context,
+ uint32_t beacon_state)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox = NULL;
+ uint32_t len;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return 1;
+
+ len = sizeof(struct lpfc_mbx_set_beacon_config) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
+ LPFC_SLI4_MBX_EMBED);
+ mbox->context1 = (void *)lcb_context;
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_els_lcb_rsp;
+ bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
+ phba->sli4_hba.physical_port);
+ bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
+ beacon_state);
+ bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
+ bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * lpfc_els_rcv_lcb - Process an unsolicited LCB
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
+ * First, the payload of the unsolicited LCB is checked.
+ * Then based on Subcommand beacon will either turn on or off.
+ *
+ * Return code
+ * 0 - Sent the acc response
+ * 1 - Sent the reject response.
+ **/
+static int
+lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *pcmd;
+ IOCB_t *icmd;
+ uint8_t *lp;
+ struct fc_lcb_request_frame *beacon;
+ struct lpfc_lcb_context *lcb_context;
+ uint8_t state, rjt_err;
+ struct ls_rjt stat;
+
+ icmd = &cmdiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ lp = (uint8_t *)pcmd->virt;
+ beacon = (struct fc_lcb_request_frame *)pcmd->virt;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
+ "type x%x frequency %x duration x%x\n",
+ lp[0], lp[1], lp[2],
+ beacon->lcb_command,
+ beacon->lcb_sub_command,
+ beacon->lcb_type,
+ beacon->lcb_frequency,
+ be16_to_cpu(beacon->lcb_duration));
+
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ lcb_context = kmalloc(sizeof(struct lpfc_lcb_context), GFP_KERNEL);
+
+ if (phba->hba_flag & HBA_FCOE_MODE) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ if (beacon->lcb_frequency == 0) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
+ (beacon->lcb_type != LPFC_LCB_AMBER)) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
+ (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
+ (beacon->lcb_type != LPFC_LCB_GREEN) &&
+ (beacon->lcb_type != LPFC_LCB_AMBER)) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+ if (be16_to_cpu(beacon->lcb_duration) != 0) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ goto rjt;
+ }
+
+ state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
+ lcb_context->sub_command = beacon->lcb_sub_command;
+ lcb_context->type = beacon->lcb_type;
+ lcb_context->frequency = beacon->lcb_frequency;
+ lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+ lcb_context->ndlp = lpfc_nlp_get(ndlp);
+ if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
+ lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+ LOG_ELS, "0193 failed to send mail box");
+ lpfc_nlp_put(ndlp);
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto rjt;
+ }
+ return 0;
+rjt:
+ memset(&stat, 0, sizeof(stat));
+ stat.un.b.lsRjtRsnCode = rjt_err;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 1;
+}
+
+
/**
* lpfc_els_flush_rscn - Clean up any rscn activities with a vport
* @vport: pointer to a host virtual N_Port data structure.
@@ -6706,8 +7354,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
+ spin_unlock_irq(shost->host_lock);
goto dropit;
+ }
+ spin_unlock_irq(shost->host_lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->vport = vport;
@@ -6751,7 +7404,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
- shost = lpfc_shost_from_vport(vport);
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6821,6 +7473,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
break;
+ case ELS_CMD_LCB:
+ phba->fc_stat.elsRcvLCB++;
+ lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
+ break;
+ case ELS_CMD_RDP:
+ phba->fc_stat.elsRcvRDP++;
+ lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
+ break;
case ELS_CMD_RSCN:
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
@@ -7586,7 +8246,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_do_scr_ns_plogi(phba, vport);
goto out;
fdisc_failed:
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
/* Cancel discovery timer */
lpfc_can_disctmo(vport);
lpfc_nlp_put(ndlp);
@@ -7739,8 +8400,10 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
vport->fc_flag &= ~FC_FABRIC;
spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2500f15d437f..ce96d5bf8ae7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -106,6 +106,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_rport_data *rdata;
struct lpfc_nodelist * ndlp;
struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
int put_node;
@@ -146,48 +147,32 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- if (ndlp->nlp_type & NLP_FABRIC) {
-
- /* If the WWPN of the rport and ndlp don't match, ignore it */
- if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "6789 rport name %lx != node port name %lx",
- (unsigned long)rport->port_name,
- (unsigned long)wwn_to_u64(
- ndlp->nlp_portname.u.wwn));
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
- return;
- }
-
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
- return;
- }
+ if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "6789 rport name %llx != node port name %llx",
+ rport->port_name,
+ wwn_to_u64(ndlp->nlp_portname.u.wwn));
evtp = &ndlp->dev_loss_evt;
- if (!list_empty(&evtp->evt_listp))
+ if (!list_empty(&evtp->evt_listp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "6790 rport name %llx dev_loss_evt pending",
+ rport->port_name);
return;
+ }
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS;
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
+ spin_unlock_irq(shost->host_lock);
- spin_lock_irq(&phba->hbalock);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irq(&phba->hbalock);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -215,22 +200,24 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
struct fc_rport *rport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
+ struct Scsi_Host *shost;
uint8_t *name;
int put_node;
- int put_rport;
int warn_on = 0;
int fcf_inuse = 0;
rport = ndlp->rport;
+ vport = ndlp->vport;
+ shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irq(shost->host_lock);
- if (!rport) {
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ if (!rport)
return fcf_inuse;
- }
- rdata = rport->dd_data;
name = (uint8_t *) &ndlp->nlp_portname;
- vport = ndlp->vport;
phba = vport->phba;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -244,6 +231,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
"3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+ /*
+ * lpfc_nlp_remove if reached with dangling rport drops the
+ * reference. To make sure that does not happen clear rport
+ * pointer in ndlp before lpfc_nlp_put.
+ */
+ rdata = rport->dd_data;
+
/* Don't defer this if we are in the process of deleting the vport
* or unloading the driver. The unload will cleanup the node
* appropriately we just need to cleanup the ndlp rport info here.
@@ -256,14 +250,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
if (put_node)
lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
+ put_device(&rport->dev);
+
return fcf_inuse;
}
@@ -275,28 +267,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
return fcf_inuse;
}
- if (ndlp->nlp_type & NLP_FABRIC) {
- /* We will clean up these Nodes in linkup */
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
+ put_node = rdata->pnode != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ put_device(&rport->dev);
+
+ if (ndlp->nlp_type & NLP_FABRIC)
return fcf_inuse;
- }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
- /* flush the target */
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
@@ -321,16 +306,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
-
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
@@ -1802,7 +1777,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
dma_addr_t phys_addr;
struct lpfc_mbx_sge sge;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
- uint32_t shdr_status, shdr_add_status;
+ uint32_t shdr_status, shdr_add_status, if_type;
union lpfc_sli4_cfg_shdr *shdr;
struct fcf_record *new_fcf_record;
@@ -1823,9 +1798,11 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
lpfc_sli_pcimem_bcopy(shdr, shdr,
sizeof(union lpfc_sli4_cfg_shdr));
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status) {
- if (shdr_status == STATUS_FCF_TABLE_EMPTY)
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_2)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2726 READ_FCF_RECORD Indicates empty "
"FCF table.\n");
@@ -3868,11 +3845,11 @@ out:
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
- lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
@@ -3918,9 +3895,17 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* registered port, drop the reference that we took the last time we
* registered the port.
*/
- if (ndlp->rport && ndlp->rport->dd_data &&
- ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
- lpfc_nlp_put(ndlp);
+ rport = ndlp->rport;
+ if (rport) {
+ rdata = rport->dd_data;
+ /* break the link before dropping the ref */
+ ndlp->rport = NULL;
+ if (rdata && rdata->pnode == ndlp)
+ lpfc_nlp_put(ndlp);
+ rdata->pnode = NULL;
+ /* drop reference for earlier registeration */
+ put_device(&rport->dev);
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport add: did:x%x flg:x%x type x%x",
@@ -4296,9 +4281,9 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
- } else {
- lpfc_nlp_put(ndlp);
}
+
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4510,7 +4495,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- int rc;
+ int rc, acc_plogi = 1;
uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
@@ -4543,14 +4528,20 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->context1 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
} else
mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
+ acc_plogi = 1;
+ }
}
lpfc_no_rpi(phba, ndlp);
@@ -4558,8 +4549,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ if (acc_plogi)
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
return 1;
}
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
return 0;
}
@@ -4761,6 +4755,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
LPFC_MBOXQ_t *mbox;
int rc;
@@ -4798,14 +4793,24 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_node(vport, ndlp);
/*
- * We can get here with a non-NULL ndlp->rport because when we
- * unregister a rport we don't break the rport/node linkage. So if we
- * do, make sure we don't leaving any dangling pointers behind.
+ * ndlp->rport must be set to NULL before it reaches here
+ * i.e. break rport/node link before doing lpfc_nlp_put for
+ * registered rport and then drop the reference of rport.
*/
if (ndlp->rport) {
- rdata = ndlp->rport->dd_data;
+ /*
+ * extra lpfc_nlp_put dropped the reference of ndlp
+ * for registered rport so need to cleanup rport
+ */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0940 removed node x%p DID x%x "
+ " rport not null %p\n",
+ ndlp, ndlp->nlp_DID, ndlp->rport);
+ rport = ndlp->rport;
+ rdata = rport->dd_data;
rdata->pnode = NULL;
ndlp->rport = NULL;
+ put_device(&rport->dev);
}
}
@@ -4833,9 +4838,19 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (matchdid.un.b.id == ndlpdid.un.b.id) {
if ((mydid.un.b.domain == matchdid.un.b.domain) &&
(mydid.un.b.area == matchdid.un.b.area)) {
+ /* This code is supposed to match the ID
+ * for a private loop device that is
+ * connect to fl_port. But we need to
+ * check that the port did not just go
+ * from pt2pt to fabric or we could end
+ * up matching ndlp->nlp_DID 000001 to
+ * fabric DID 0x20101
+ */
if ((ndlpdid.un.b.domain == 0) &&
(ndlpdid.un.b.area == 0)) {
- if (ndlpdid.un.b.id)
+ if (ndlpdid.un.b.id &&
+ vport->phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP)
return 1;
}
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 37beb9dc1311..892c5257d87c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -543,6 +543,7 @@ struct fc_vft_header {
#define ELS_CMD_TEST 0x11000000
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_REC 0x13000000
+#define ELS_CMD_RDP 0x18000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
@@ -558,6 +559,7 @@ struct fc_vft_header {
#define ELS_CMD_SCR 0x62000000
#define ELS_CMD_RNID 0x78000000
#define ELS_CMD_LIRR 0x7A000000
+#define ELS_CMD_LCB 0x81000000
#else /* __LITTLE_ENDIAN_BITFIELD */
#define ELS_CMD_MASK 0xffff
#define ELS_RSP_MASK 0xff
@@ -580,6 +582,7 @@ struct fc_vft_header {
#define ELS_CMD_TEST 0x11
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_REC 0x13
+#define ELS_CMD_RDP 0x18
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
@@ -595,6 +598,7 @@ struct fc_vft_header {
#define ELS_CMD_SCR 0x62
#define ELS_CMD_RNID 0x78
#define ELS_CMD_LIRR 0x7A
+#define ELS_CMD_LCB 0x81
#endif
/*
@@ -1010,6 +1014,198 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
} un;
} ELS_PKT;
+/*
+ * Link Cable Beacon (LCB) ELS Frame
+ */
+
+struct fc_lcb_request_frame {
+ uint32_t lcb_command; /* ELS command opcode (0x81) */
+ uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
+#define LPFC_LCB_ON 0x1
+#define LPFC_LCB_OFF 0x2
+ uint8_t reserved[3];
+
+ uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
+#define LPFC_LCB_GREEN 0x1
+#define LPFC_LCB_AMBER 0x2
+ uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
+ uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
+};
+
+/*
+ * Link Cable Beacon (LCB) ELS Response Frame
+ */
+struct fc_lcb_res_frame {
+ uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
+ uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
+ uint8_t reserved[3];
+ uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
+ uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
+ uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
+};
+
+/*
+ * Read Diagnostic Parameters (RDP) ELS frame.
+ */
+#define SFF_PG0_IDENT_SFP 0x3
+
+#define SFP_FLAG_PT_OPTICAL 0x0
+#define SFP_FLAG_PT_SWLASER 0x01
+#define SFP_FLAG_PT_LWLASER_LC1310 0x02
+#define SFP_FLAG_PT_LWLASER_LL1550 0x03
+#define SFP_FLAG_PT_MASK 0x0F
+#define SFP_FLAG_PT_SHIFT 0
+
+#define SFP_FLAG_IS_OPTICAL_PORT 0x01
+#define SFP_FLAG_IS_OPTICAL_MASK 0x010
+#define SFP_FLAG_IS_OPTICAL_SHIFT 4
+
+#define SFP_FLAG_IS_DESC_VALID 0x01
+#define SFP_FLAG_IS_DESC_VALID_MASK 0x020
+#define SFP_FLAG_IS_DESC_VALID_SHIFT 5
+
+#define SFP_FLAG_CT_UNKNOWN 0x0
+#define SFP_FLAG_CT_SFP_PLUS 0x01
+#define SFP_FLAG_CT_MASK 0x3C
+#define SFP_FLAG_CT_SHIFT 6
+
+struct fc_rdp_port_name_info {
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+};
+
+
+/*
+ * Link Error Status Block Structure (FC-FS-3) for RDP
+ * This similar to RPS ELS
+ */
+struct fc_link_status {
+ uint32_t link_failure_cnt;
+ uint32_t loss_of_synch_cnt;
+ uint32_t loss_of_signal_cnt;
+ uint32_t primitive_seq_proto_err;
+ uint32_t invalid_trans_word;
+ uint32_t invalid_crc_cnt;
+
+};
+
+#define RDP_PORT_NAMES_DESC_TAG 0x00010003
+struct fc_rdp_port_name_desc {
+ uint32_t tag; /* 0001 0003h */
+ uint32_t length; /* set to size of payload struct */
+ struct fc_rdp_port_name_info port_names;
+};
+
+
+struct fc_rdp_link_error_status_payload_info {
+ struct fc_link_status link_status; /* 24 bytes */
+ uint32_t port_type; /* bits 31-30 only */
+};
+
+#define RDP_LINK_ERROR_STATUS_DESC_TAG 0x00010002
+struct fc_rdp_link_error_status_desc {
+ uint32_t tag; /* 0001 0002h */
+ uint32_t length; /* set to size of payload struct */
+ struct fc_rdp_link_error_status_payload_info info;
+};
+
+#define VN_PT_PHY_UNKNOWN 0x00
+#define VN_PT_PHY_PF_PORT 0x01
+#define VN_PT_PHY_ETH_MAC 0x10
+#define VN_PT_PHY_SHIFT 30
+
+#define RDP_PS_1GB 0x8000
+#define RDP_PS_2GB 0x4000
+#define RDP_PS_4GB 0x2000
+#define RDP_PS_10GB 0x1000
+#define RDP_PS_8GB 0x0800
+#define RDP_PS_16GB 0x0400
+#define RDP_PS_32GB 0x0200
+
+#define RDP_CAP_UNKNOWN 0x0001
+#define RDP_PS_UNKNOWN 0x0002
+#define RDP_PS_NOT_ESTABLISHED 0x0001
+
+struct fc_rdp_port_speed {
+ uint16_t capabilities;
+ uint16_t speed;
+};
+
+struct fc_rdp_port_speed_info {
+ struct fc_rdp_port_speed port_speed;
+};
+
+#define RDP_PORT_SPEED_DESC_TAG 0x00010001
+struct fc_rdp_port_speed_desc {
+ uint32_t tag; /* 00010001h */
+ uint32_t length; /* set to size of payload struct */
+ struct fc_rdp_port_speed_info info;
+};
+
+#define RDP_NPORT_ID_SIZE 4
+#define RDP_N_PORT_DESC_TAG 0x00000003
+struct fc_rdp_nport_desc {
+ uint32_t tag; /* 0000 0003h, big endian */
+ uint32_t length; /* size of RDP_N_PORT_ID struct */
+ uint32_t nport_id : 12;
+ uint32_t reserved : 8;
+};
+
+
+struct fc_rdp_link_service_info {
+ uint32_t els_req; /* Request payload word 0 value.*/
+};
+
+#define RDP_LINK_SERVICE_DESC_TAG 0x00000001
+struct fc_rdp_link_service_desc {
+ uint32_t tag; /* Descriptor tag 1 */
+ uint32_t length; /* set to size of payload struct. */
+ struct fc_rdp_link_service_info payload;
+ /* must be ELS req Word 0(0x18) */
+};
+
+struct fc_rdp_sfp_info {
+ uint16_t temperature;
+ uint16_t vcc;
+ uint16_t tx_bias;
+ uint16_t tx_power;
+ uint16_t rx_power;
+ uint16_t flags;
+};
+
+#define RDP_SFP_DESC_TAG 0x00010000
+struct fc_rdp_sfp_desc {
+ uint32_t tag;
+ uint32_t length; /* set to size of sfp_info struct */
+ struct fc_rdp_sfp_info sfp_info;
+};
+
+struct fc_rdp_req_frame {
+ uint32_t rdp_command; /* ELS command opcode (0x18)*/
+ uint32_t rdp_des_length; /* RDP Payload Word 1 */
+ struct fc_rdp_nport_desc nport_id_desc; /* RDP Payload Word 2 - 4 */
+};
+
+
+struct fc_rdp_res_frame {
+ uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
+ uint32_t length; /* FC Word 1 */
+ struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
+ struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
+ struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */
+ struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
+ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
+ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
+};
+
+
+#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \
+ + sizeof(struct fc_rdp_sfp_desc) \
+ + sizeof(struct fc_rdp_port_speed_desc) \
+ + sizeof(struct fc_rdp_link_error_status_desc) \
+ + (sizeof(struct fc_rdp_port_name_desc) * 2))
+
+
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
@@ -1587,6 +1783,11 @@ typedef struct { /* FireFly BIU registers */
#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */
/*
+ * return code Fail
+ */
+#define FAILURE 1
+
+/*
* Begin Structure Definitions for Mailbox Commands
*/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1813c45946f4..33ec4fa39ccb 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -291,7 +291,7 @@ struct sli4_bls_rsp {
struct lpfc_eqe {
uint32_t word0;
#define lpfc_eqe_resource_id_SHIFT 16
-#define lpfc_eqe_resource_id_MASK 0x000000FF
+#define lpfc_eqe_resource_id_MASK 0x0000FFFF
#define lpfc_eqe_resource_id_WORD word0
#define lpfc_eqe_minor_code_SHIFT 4
#define lpfc_eqe_minor_code_MASK 0x00000FFF
@@ -914,6 +914,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
+#define LPFC_MBOX_OPCODE_SET_BEACON_CONFIG 0x45
+#define LPFC_MBOX_OPCODE_GET_BEACON_CONFIG 0x46
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
@@ -1479,6 +1481,26 @@ struct lpfc_mbx_query_fw_config {
} rsp;
};
+struct lpfc_mbx_set_beacon_config {
+ struct mbox_header header;
+ uint32_t word4;
+#define lpfc_mbx_set_beacon_port_num_SHIFT 0
+#define lpfc_mbx_set_beacon_port_num_MASK 0x0000003F
+#define lpfc_mbx_set_beacon_port_num_WORD word4
+#define lpfc_mbx_set_beacon_port_type_SHIFT 6
+#define lpfc_mbx_set_beacon_port_type_MASK 0x00000003
+#define lpfc_mbx_set_beacon_port_type_WORD word4
+#define lpfc_mbx_set_beacon_state_SHIFT 8
+#define lpfc_mbx_set_beacon_state_MASK 0x000000FF
+#define lpfc_mbx_set_beacon_state_WORD word4
+#define lpfc_mbx_set_beacon_duration_SHIFT 16
+#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
+#define lpfc_mbx_set_beacon_duration_WORD word4
+#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
+#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
+#define lpfc_mbx_set_beacon_status_duration_WORD word4
+};
+
struct lpfc_id_range {
uint32_t word5;
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1921,6 +1943,12 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define STATUS_FCF_IN_USE 0x3a
#define STATUS_FCF_TABLE_EMPTY 0x43
+/*
+ * Additional status field for embedded SLI_CONFIG mailbox
+ * command.
+ */
+#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
+
struct lpfc_mbx_sli4_config {
struct mbox_header header;
};
@@ -2433,6 +2461,205 @@ struct lpfc_mbx_supp_pages {
#define LPFC_SLI4_PARAMETERS 2
};
+struct lpfc_mbx_memory_dump_type3 {
+ uint32_t word1;
+#define lpfc_mbx_memory_dump_type3_type_SHIFT 0
+#define lpfc_mbx_memory_dump_type3_type_MASK 0x0000000f
+#define lpfc_mbx_memory_dump_type3_type_WORD word1
+#define lpfc_mbx_memory_dump_type3_link_SHIFT 24
+#define lpfc_mbx_memory_dump_type3_link_MASK 0x000000ff
+#define lpfc_mbx_memory_dump_type3_link_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_memory_dump_type3_page_no_SHIFT 0
+#define lpfc_mbx_memory_dump_type3_page_no_MASK 0x0000ffff
+#define lpfc_mbx_memory_dump_type3_page_no_WORD word2
+#define lpfc_mbx_memory_dump_type3_offset_SHIFT 16
+#define lpfc_mbx_memory_dump_type3_offset_MASK 0x0000ffff
+#define lpfc_mbx_memory_dump_type3_offset_WORD word2
+ uint32_t word3;
+#define lpfc_mbx_memory_dump_type3_length_SHIFT 0
+#define lpfc_mbx_memory_dump_type3_length_MASK 0x00ffffff
+#define lpfc_mbx_memory_dump_type3_length_WORD word3
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t return_len;
+};
+
+#define DMP_PAGE_A0 0xa0
+#define DMP_PAGE_A2 0xa2
+#define DMP_SFF_PAGE_A0_SIZE 256
+#define DMP_SFF_PAGE_A2_SIZE 256
+
+#define SFP_WAVELENGTH_LC1310 1310
+#define SFP_WAVELENGTH_LL1550 1550
+
+
+/*
+ * * SFF-8472 TABLE 3.4
+ * */
+#define SFF_PG0_CONNECTOR_UNKNOWN 0x00 /* Unknown */
+#define SFF_PG0_CONNECTOR_SC 0x01 /* SC */
+#define SFF_PG0_CONNECTOR_FC_COPPER1 0x02 /* FC style 1 copper connector */
+#define SFF_PG0_CONNECTOR_FC_COPPER2 0x03 /* FC style 2 copper connector */
+#define SFF_PG0_CONNECTOR_BNC 0x04 /* BNC / TNC */
+#define SFF_PG0_CONNECTOR__FC_COAX 0x05 /* FC coaxial headers */
+#define SFF_PG0_CONNECTOR_FIBERJACK 0x06 /* FiberJack */
+#define SFF_PG0_CONNECTOR_LC 0x07 /* LC */
+#define SFF_PG0_CONNECTOR_MT 0x08 /* MT - RJ */
+#define SFF_PG0_CONNECTOR_MU 0x09 /* MU */
+#define SFF_PG0_CONNECTOR_SF 0x0A /* SG */
+#define SFF_PG0_CONNECTOR_OPTICAL_PIGTAIL 0x0B /* Optical pigtail */
+#define SFF_PG0_CONNECTOR_OPTICAL_PARALLEL 0x0C /* MPO Parallel Optic */
+#define SFF_PG0_CONNECTOR_HSSDC_II 0x20 /* HSSDC II */
+#define SFF_PG0_CONNECTOR_COPPER_PIGTAIL 0x21 /* Copper pigtail */
+#define SFF_PG0_CONNECTOR_RJ45 0x22 /* RJ45 */
+
+/* SFF-8472 Table 3.1 Diagnostics: Data Fields Address/Page A0 */
+
+#define SSF_IDENTIFIER 0
+#define SSF_EXT_IDENTIFIER 1
+#define SSF_CONNECTOR 2
+#define SSF_TRANSCEIVER_CODE_B0 3
+#define SSF_TRANSCEIVER_CODE_B1 4
+#define SSF_TRANSCEIVER_CODE_B2 5
+#define SSF_TRANSCEIVER_CODE_B3 6
+#define SSF_TRANSCEIVER_CODE_B4 7
+#define SSF_TRANSCEIVER_CODE_B5 8
+#define SSF_TRANSCEIVER_CODE_B6 9
+#define SSF_TRANSCEIVER_CODE_B7 10
+#define SSF_ENCODING 11
+#define SSF_BR_NOMINAL 12
+#define SSF_RATE_IDENTIFIER 13
+#define SSF_LENGTH_9UM_KM 14
+#define SSF_LENGTH_9UM 15
+#define SSF_LENGTH_50UM_OM2 16
+#define SSF_LENGTH_62UM_OM1 17
+#define SFF_LENGTH_COPPER 18
+#define SSF_LENGTH_50UM_OM3 19
+#define SSF_VENDOR_NAME 20
+#define SSF_VENDOR_OUI 36
+#define SSF_VENDOR_PN 40
+#define SSF_VENDOR_REV 56
+#define SSF_WAVELENGTH_B1 60
+#define SSF_WAVELENGTH_B0 61
+#define SSF_CC_BASE 63
+#define SSF_OPTIONS_B1 64
+#define SSF_OPTIONS_B0 65
+#define SSF_BR_MAX 66
+#define SSF_BR_MIN 67
+#define SSF_VENDOR_SN 68
+#define SSF_DATE_CODE 84
+#define SSF_MONITORING_TYPEDIAGNOSTIC 92
+#define SSF_ENHANCED_OPTIONS 93
+#define SFF_8472_COMPLIANCE 94
+#define SSF_CC_EXT 95
+#define SSF_A0_VENDOR_SPECIFIC 96
+
+/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */
+
+#define SSF_AW_THRESHOLDS 0
+#define SSF_EXT_CAL_CONSTANTS 56
+#define SSF_CC_DMI 95
+#define SFF_TEMPERATURE_B1 96
+#define SFF_TEMPERATURE_B0 97
+#define SFF_VCC_B1 98
+#define SFF_VCC_B0 99
+#define SFF_TX_BIAS_CURRENT_B1 100
+#define SFF_TX_BIAS_CURRENT_B0 101
+#define SFF_TXPOWER_B1 102
+#define SFF_TXPOWER_B0 103
+#define SFF_RXPOWER_B1 104
+#define SFF_RXPOWER_B0 105
+#define SSF_STATUS_CONTROL 110
+#define SSF_ALARM_FLAGS_B1 112
+#define SSF_ALARM_FLAGS_B0 113
+#define SSF_WARNING_FLAGS_B1 116
+#define SSF_WARNING_FLAGS_B0 117
+#define SSF_EXT_TATUS_CONTROL_B1 118
+#define SSF_EXT_TATUS_CONTROL_B0 119
+#define SSF_A2_VENDOR_SPECIFIC 120
+#define SSF_USER_EEPROM 128
+#define SSF_VENDOR_CONTROL 148
+
+
+/*
+ * Tranceiver codes Fibre Channel SFF-8472
+ * Table 3.5.
+ */
+
+struct sff_trasnceiver_codes_byte0 {
+ uint8_t inifiband:4;
+ uint8_t teng_ethernet:4;
+};
+
+struct sff_trasnceiver_codes_byte1 {
+ uint8_t sonet:6;
+ uint8_t escon:2;
+};
+
+struct sff_trasnceiver_codes_byte2 {
+ uint8_t soNet:8;
+};
+
+struct sff_trasnceiver_codes_byte3 {
+ uint8_t ethernet:8;
+};
+
+struct sff_trasnceiver_codes_byte4 {
+ uint8_t fc_el_lo:1;
+ uint8_t fc_lw_laser:1;
+ uint8_t fc_sw_laser:1;
+ uint8_t fc_md_distance:1;
+ uint8_t fc_lg_distance:1;
+ uint8_t fc_int_distance:1;
+ uint8_t fc_short_distance:1;
+ uint8_t fc_vld_distance:1;
+};
+
+struct sff_trasnceiver_codes_byte5 {
+ uint8_t reserved1:1;
+ uint8_t reserved2:1;
+ uint8_t fc_sfp_active:1; /* Active cable */
+ uint8_t fc_sfp_passive:1; /* Passive cable */
+ uint8_t fc_lw_laser:1; /* Longwave laser */
+ uint8_t fc_sw_laser_sl:1;
+ uint8_t fc_sw_laser_sn:1;
+ uint8_t fc_el_hi:1; /* Electrical enclosure high bit */
+};
+
+struct sff_trasnceiver_codes_byte6 {
+ uint8_t fc_tm_sm:1; /* Single Mode */
+ uint8_t reserved:1;
+ uint8_t fc_tm_m6:1; /* Multimode, 62.5um (M6) */
+ uint8_t fc_tm_tv:1; /* Video Coax (TV) */
+ uint8_t fc_tm_mi:1; /* Miniature Coax (MI) */
+ uint8_t fc_tm_tp:1; /* Twisted Pair (TP) */
+ uint8_t fc_tm_tw:1; /* Twin Axial Pair */
+};
+
+struct sff_trasnceiver_codes_byte7 {
+ uint8_t fc_sp_100MB:1; /* 100 MB/sec */
+ uint8_t reserve:1;
+ uint8_t fc_sp_200mb:1; /* 200 MB/sec */
+ uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */
+ uint8_t fc_sp_400MB:1; /* 400 MB/sec */
+ uint8_t fc_sp_1600MB:1; /* 1600 MB/sec */
+ uint8_t fc_sp_800MB:1; /* 800 MB/sec */
+ uint8_t fc_sp_1200MB:1; /* 1200 MB/sec */
+};
+
+/* User writable non-volatile memory, SFF-8472 Table 3.20 */
+struct user_eeprom {
+ uint8_t vendor_name[16];
+ uint8_t vendor_oui[3];
+ uint8_t vendor_pn[816];
+ uint8_t vendor_rev[4];
+ uint8_t vendor_sn[16];
+ uint8_t datecode[6];
+ uint8_t lot_code[2];
+ uint8_t reserved191[57];
+};
+
struct lpfc_mbx_pc_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
@@ -3021,6 +3248,7 @@ struct lpfc_mqe {
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_config query_fw_cfg;
+ struct lpfc_mbx_set_beacon_config beacon_config;
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
@@ -3031,6 +3259,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_wr_object wr_object;
struct lpfc_mbx_get_port_name get_port_name;
+ struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_nop nop;
} un;
};
@@ -3041,8 +3270,8 @@ struct lpfc_mcqe {
#define lpfc_mcqe_status_MASK 0x0000FFFF
#define lpfc_mcqe_status_WORD word0
#define lpfc_mcqe_ext_status_SHIFT 16
-#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
-#define lpfc_mcqe_ext_status_WORD word0
+#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD word0
uint32_t mcqe_tag0;
uint32_t mcqe_tag1;
uint32_t trailer;
@@ -3176,6 +3405,7 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_SPEED_8G 0x8
#define LPFC_FC_LA_SPEED_10G 0xA
#define LPFC_FC_LA_SPEED_16G 0x10
+#define LPFC_FC_LA_SPEED_32G 0x20
#define lpfc_acqe_fc_la_topology_SHIFT 16
#define lpfc_acqe_fc_la_topology_MASK 0x000000FF
#define lpfc_acqe_fc_la_topology_WORD word0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8c8c1ecc1f5..f962118da8ed 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
+ shost->nr_hw_queues = phba->cfg_fcp_io_channel;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -4483,7 +4484,13 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_destroy_vport_work_array(phba, vports);
}
- if (active_vlink_present) {
+ /*
+ * Don't re-instantiate if vport is marked for deletion.
+ * If we are here first then vport_delete is going to wait
+ * for discovery to complete.
+ */
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -7500,6 +7507,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+ phba->sli4_hba.physical_port =
+ mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
@@ -8367,7 +8376,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
/* vector-0 is associated to slow-path handler */
rc = request_irq(phba->msix_entries[0].vector,
- &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+ &lpfc_sli_sp_intr_handler, 0,
LPFC_SP_DRIVER_HANDLER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -8378,7 +8387,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
/* vector-1 is associated to fast-path handler */
rc = request_irq(phba->msix_entries[1].vector,
- &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+ &lpfc_sli_fp_intr_handler, 0,
LPFC_FP_DRIVER_HANDLER_NAME, phba);
if (rc) {
@@ -8487,7 +8496,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
}
rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ 0, LPFC_DRIVER_NAME, phba);
if (rc) {
pci_disable_msi(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -8944,13 +8953,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
if (phba->cfg_fof && (index == (vectors - 1)))
rc = request_irq(
phba->sli4_hba.msix_entries[index].vector,
- &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+ &lpfc_sli4_fof_intr_handler, 0,
(char *)&phba->sli4_hba.handler_name[index],
&phba->sli4_hba.fcp_eq_hdl[index]);
else
rc = request_irq(
phba->sli4_hba.msix_entries[index].vector,
- &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
+ &lpfc_sli4_hba_intr_handler, 0,
(char *)&phba->sli4_hba.handler_name[index],
&phba->sli4_hba.fcp_eq_hdl[index]);
if (rc) {
@@ -8972,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = vectors;
}
- lpfc_sli4_set_affinity(phba, vectors);
+ if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
+ lpfc_sli4_set_affinity(phba, vectors);
return rc;
cfg_fail_out:
@@ -9050,7 +9060,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ 0, LPFC_DRIVER_NAME, phba);
if (rc) {
pci_disable_msi(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 816f596cda60..eb627724417e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2255,6 +2255,158 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
return 0;
}
+void
+lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ MAILBOX_t *mb;
+ int rc = FAILURE;
+ struct lpfc_rdp_context *rdp_context =
+ (struct lpfc_rdp_context *)(mboxq->context2);
+
+ mb = &mboxq->u.mb;
+ if (mb->mbxStatus)
+ goto mbx_failed;
+
+ memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
+
+ rc = SUCCESS;
+
+mbx_failed:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ rdp_context->cmpl(phba, rdp_context, rc);
+}
+
+void
+lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
+ struct lpfc_rdp_context *rdp_context =
+ (struct lpfc_rdp_context *)(mbox->context2);
+
+ if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
+ goto error;
+
+ lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
+ DMP_SFF_PAGE_A2_SIZE);
+
+ /* We don't need dma buffer for link stat. */
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+
+ memset(mbox, 0, sizeof(*mbox));
+ lpfc_read_lnk_stat(phba, mbox);
+ mbox->vport = rdp_context->ndlp->vport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
+ mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
+ goto error;
+
+ return;
+
+error:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rdp_context->cmpl(phba, rdp_context, FAILURE);
+}
+
+void
+lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ int rc;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
+ struct lpfc_rdp_context *rdp_context =
+ (struct lpfc_rdp_context *)(mbox->context2);
+
+ if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
+ goto error;
+
+ lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
+ DMP_SFF_PAGE_A0_SIZE);
+
+ memset(mbox, 0, sizeof(*mbox));
+
+ memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+
+ /* save address for completion */
+ mbox->context1 = mp;
+ mbox->vport = rdp_context->ndlp->vport;
+
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
+ bf_set(lpfc_mbx_memory_dump_type3_type,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
+ bf_set(lpfc_mbx_memory_dump_type3_link,
+ &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
+ bf_set(lpfc_mbx_memory_dump_type3_page_no,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
+ bf_set(lpfc_mbx_memory_dump_type3_length,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
+ mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
+ mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
+ mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto error;
+
+ return;
+
+error:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rdp_context->cmpl(phba, rdp_context, FAILURE);
+}
+
+
+/*
+ * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump configure
+ * type 3 page 0xA0.
+ */
+int
+lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_dmabuf *mp = NULL;
+
+ memset(mbox, 0, sizeof(*mbox));
+
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "3569 dump type 3 page 0xA0 allocation failed\n");
+ return 1;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
+ /* save address for completion */
+ mbox->context1 = mp;
+
+ bf_set(lpfc_mbx_memory_dump_type3_type,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
+ bf_set(lpfc_mbx_memory_dump_type3_link,
+ &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
+ bf_set(lpfc_mbx_memory_dump_type3_page_no,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
+ bf_set(lpfc_mbx_memory_dump_type3_length,
+ &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+ mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
+ mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+
+ return 0;
+}
+
/**
* lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
* @phba: pointer to the hba structure containing the FCF index and RQ ID.
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4cb9882af157..af3b38aba65e 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -661,7 +661,13 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_destroy_vport_work_array(phba, vports);
}
- if (active_vlink_present) {
+ /*
+ * Don't re-instantiate if vport is marked for deletion.
+ * If we are here first then vport_delete is going to wait
+ * for discovery to complete.
+ */
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -1868,7 +1874,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= NLP_LOGO_ACC;
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c140f99772ca..e5eb40d2c512 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3257,7 +3257,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
*/
nseg = scsi_dma_map(scsi_cmnd);
- if (unlikely(!nseg))
+ if (unlikely(nseg <= 0))
return 1;
sgl += 1;
/* clear the last flag in the fcp_rsp map entry */
@@ -3846,6 +3846,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
+ * If scsi-mq is enabled, get the default block layer mapping of software queues
+ * to hardware queues. This information is saved in request tag.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct lpfc_vector_map_info *cpup;
+ int chann, cpu;
+ uint32_t tag;
+ uint16_t hwq;
+
+ if (shost_use_blk_mq(cmnd->device->host)) {
+ tag = blk_mq_unique_tag(cmnd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ return hwq;
+ }
+
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
+ cpu = smp_processor_id();
+ if (cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpup += cpu;
+ return cpup->channel_id;
+ }
+ }
+ chann = atomic_add_return(1, &phba->fcp_qidx);
+ chann = (chann % phba->cfg_fcp_io_channel);
+ return chann;
+}
+
+
+/**
* lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
* @phba: The Hba for which this call is being executed.
* @pIocbIn: The command IOCBQ for the scsi cmnd.
@@ -4537,7 +4580,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC,
"0707 driver's buffer pool is empty, "
"IO busied\n");
goto out_host_busy;
@@ -4968,13 +5011,16 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if ((status != IOCB_SUCCESS) ||
(iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
- "iocb_flag x%x\n",
- lpfc_taskmgmt_name(task_mgmt_cmd),
- tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4],
- iocbq->iocb_flag);
+ if (status != IOCB_SUCCESS ||
+ iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0727 TMF %s to TGT %d LUN %llu "
+ "failed (%d, %d) iocb_flag x%x\n",
+ lpfc_taskmgmt_name(task_mgmt_cmd),
+ tgt_id, lun_id,
+ iocbqrsp->iocb.ulpStatus,
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
if (status == IOCB_SUCCESS) {
if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
@@ -4988,7 +5034,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
} else {
ret = FAILED;
}
- lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
} else
ret = SUCCESS;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 474e30cdee6e..18b9260ccfac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -184,3 +184,6 @@ struct lpfc_scsi_buf {
#define FIND_FIRST_OAS_LUN 0
#define NO_MORE_OAS_LUN -1
#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
+
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 56f73682d4bd..4feb9312a447 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2249,7 +2249,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID,
ndlp->nlp_usg_map, ndlp);
-
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
lpfc_nlp_put(ndlp);
}
}
@@ -8138,36 +8138,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
}
/**
- * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
- * @phba: Pointer to HBA context object.
- *
- * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
- * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
- * held.
- *
- * Return: index into SLI4 fast-path FCP queue index.
- **/
-static inline int
-lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
-{
- struct lpfc_vector_map_info *cpup;
- int chann, cpu;
-
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
- && phba->cfg_fcp_io_channel > 1) {
- cpu = smp_processor_id();
- if (cpu < phba->sli4_hba.num_present_cpu) {
- cpup = phba->sli4_hba.cpu_map;
- cpup += cpu;
- return cpup->channel_id;
- }
- }
- chann = atomic_add_return(1, &phba->fcp_qidx);
- chann = (chann % phba->cfg_fcp_io_channel);
- return chann;
-}
-
-/**
* lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
* @phba: Pointer to HBA context object.
* @piocb: Pointer to command iocb.
@@ -8792,32 +8762,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
return 0;
}
+/**
+ * lpfc_sli_calc_ring - Calculates which ring to use
+ * @phba: Pointer to HBA context object.
+ * @ring_number: Initial ring
+ * @piocb: Pointer to command iocb.
+ *
+ * For SLI4, FCP IO can deferred to one fo many WQs, based on
+ * fcp_wqidx, thus we need to calculate the corresponding ring.
+ * Since ABORTS must go on the same WQ of the command they are
+ * aborting, we use command's fcp_wqidx.
+ */
int
lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb)
{
- uint32_t idx;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return ring_number;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (!(phba->cfg_fof) ||
+ (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return LPFC_HBA_ERROR;
/*
- * fcp_wqidx should already be setup based on what
- * completion queue we want to use.
+ * for abort iocb fcp_wqidx should already
+ * be setup based on what work queue we used.
*/
- if (!(phba->cfg_fof) ||
- (!(piocb->iocb_flag & LPFC_IO_FOF))) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return LPFC_HBA_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
- } else {
- if (unlikely(!phba->sli4_hba.oas_wq))
- return LPFC_HBA_ERROR;
- idx = 0;
- piocb->fcp_wqidx = idx;
- ring_number = LPFC_FCP_OAS_RING;
- }
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
+ piocb->fcp_wqidx =
+ lpfc_sli4_scmd_to_wqidx_distr(phba,
+ piocb->context1);
+ ring_number = MAX_SLI3_CONFIGURED_RINGS +
+ piocb->fcp_wqidx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return LPFC_HBA_ERROR;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
}
}
return ring_number;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 6eca3b8124d3..d1a5b057c6f3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -602,6 +602,7 @@ struct lpfc_sli4_hba {
struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+ uint32_t physical_port;
/* CPU to vector mapping information */
struct lpfc_vector_map_info *cpu_map;
@@ -651,6 +652,26 @@ struct lpfc_rsrc_blks {
uint16_t rsrc_used;
};
+struct lpfc_rdp_context {
+ struct lpfc_nodelist *ndlp;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ READ_LNK_VAR link_stat;
+ uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
+ uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
+};
+
+struct lpfc_lcb_context {
+ uint8_t sub_command;
+ uint8_t type;
+ uint8_t frequency;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ struct lpfc_nodelist *ndlp;
+};
+
+
/*
* SLI4 specific function prototypes
*/
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c37bb9f91c3b..6258d3d7722a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "10.5.0.0."
+#define LPFC_DRIVER_VERSION "10.7.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a87ee33f4f2a..769012663a8f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -567,8 +567,8 @@ int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
long timeout;
bool ns_ndlp_referenced = false;
@@ -645,8 +645,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
/* Remove FC host and then SCSI host with the vport */
- fc_remove_host(lpfc_shost_from_vport(vport));
- scsi_remove_host(lpfc_shost_from_vport(vport));
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
@@ -772,7 +772,8 @@ skip_logo:
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
* does the scsi_host_put() to release the vport.
*/
- if (lpfc_mbx_unreg_vpi(vport))
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
+ lpfc_mbx_unreg_vpi(vport))
scsi_host_put(shost);
} else
scsi_host_put(shost);
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 0adb2e015597..141226631429 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -403,7 +403,6 @@ static struct scsi_host_template mac53c94_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 14e5c7cea929..20c37541963f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,7 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.806.08.00-rc1"
+#define MEGASAS_VERSION "06.807.10.00-rc1"
+#define MEGASAS_RELDATE "March 6, 2015"
/*
* Device IDs
@@ -153,6 +154,9 @@
#define MFI_FRAME_DIR_BOTH 0x0018
#define MFI_FRAME_IEEE 0x0020
+/* Driver internal */
+#define DRV_DCMD_POLLED_MODE 0x1
+
/*
* Definition for cmd_status
*/
@@ -408,7 +412,7 @@ enum MR_PD_STATE {
* defines the physical drive address structure
*/
struct MR_PD_ADDRESS {
- u16 deviceId;
+ __le16 deviceId;
u16 enclDeviceId;
union {
@@ -433,8 +437,8 @@ struct MR_PD_ADDRESS {
* defines the physical drive list structure
*/
struct MR_PD_LIST {
- u32 size;
- u32 count;
+ __le32 size;
+ __le32 count;
struct MR_PD_ADDRESS addr[1];
} __packed;
@@ -451,28 +455,28 @@ union MR_LD_REF {
struct {
u8 targetId;
u8 reserved;
- u16 seqNum;
+ __le16 seqNum;
};
- u32 ref;
+ __le32 ref;
} __packed;
/*
* defines the logical drive list structure
*/
struct MR_LD_LIST {
- u32 ldCount;
- u32 reserved;
+ __le32 ldCount;
+ __le32 reserved;
struct {
union MR_LD_REF ref;
u8 state;
u8 reserved[3];
- u64 size;
+ __le64 size;
} ldList[MAX_LOGICAL_DRIVES_EXT];
} __packed;
struct MR_LD_TARGETID_LIST {
- u32 size;
- u32 count;
+ __le32 size;
+ __le32 count;
u8 pad[3];
u8 targetId[MAX_LOGICAL_DRIVES_EXT];
};
@@ -553,7 +557,7 @@ struct megasas_ctrl_prop {
} OnOffProperties;
u8 autoSnapVDSpace;
u8 viewSpace;
- u16 spinDownTime;
+ __le16 spinDownTime;
u8 reserved[24];
} __packed;
@@ -567,10 +571,10 @@ struct megasas_ctrl_info {
*/
struct {
- u16 vendor_id;
- u16 device_id;
- u16 sub_vendor_id;
- u16 sub_device_id;
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 sub_vendor_id;
+ __le16 sub_device_id;
u8 reserved[24];
} __attribute__ ((packed)) pci;
@@ -611,8 +615,8 @@ struct megasas_ctrl_info {
/*
* List of components residing in flash. All str are null terminated
*/
- u32 image_check_word;
- u32 image_component_count;
+ __le32 image_check_word;
+ __le32 image_component_count;
struct {
@@ -629,7 +633,7 @@ struct megasas_ctrl_info {
* empty if a flash operation has not occurred. All stings are null
* terminated
*/
- u32 pending_image_component_count;
+ __le32 pending_image_component_count;
struct {
@@ -662,39 +666,39 @@ struct megasas_ctrl_info {
} __attribute__ ((packed)) hw_present;
- u32 current_fw_time;
+ __le32 current_fw_time;
/*
* Maximum data transfer sizes
*/
- u16 max_concurrent_cmds;
- u16 max_sge_count;
- u32 max_request_size;
+ __le16 max_concurrent_cmds;
+ __le16 max_sge_count;
+ __le32 max_request_size;
/*
* Logical and physical device counts
*/
- u16 ld_present_count;
- u16 ld_degraded_count;
- u16 ld_offline_count;
+ __le16 ld_present_count;
+ __le16 ld_degraded_count;
+ __le16 ld_offline_count;
- u16 pd_present_count;
- u16 pd_disk_present_count;
- u16 pd_disk_pred_failure_count;
- u16 pd_disk_failed_count;
+ __le16 pd_present_count;
+ __le16 pd_disk_present_count;
+ __le16 pd_disk_pred_failure_count;
+ __le16 pd_disk_failed_count;
/*
* Memory size information
*/
- u16 nvram_size;
- u16 memory_size;
- u16 flash_size;
+ __le16 nvram_size;
+ __le16 memory_size;
+ __le16 flash_size;
/*
* Error counters
*/
- u16 mem_correctable_error_count;
- u16 mem_uncorrectable_error_count;
+ __le16 mem_correctable_error_count;
+ __le16 mem_uncorrectable_error_count;
/*
* Cluster information
@@ -705,7 +709,7 @@ struct megasas_ctrl_info {
/*
* Additional max data transfer sizes
*/
- u16 max_strips_per_io;
+ __le16 max_strips_per_io;
/*
* Controller capabilities structures
@@ -805,7 +809,7 @@ struct megasas_ctrl_info {
* deviceInterface.portAddr, and the rest shall be
* populated in deviceInterfacePortAddr2.
*/
- u64 deviceInterfacePortAddr2[8]; /*6a0h */
+ __le64 deviceInterfacePortAddr2[8]; /*6a0h */
u8 reserved3[128]; /*6e0h */
struct { /*760h */
@@ -842,26 +846,26 @@ struct megasas_ctrl_info {
u16 reserved[6];
} pdsForRaidLevels;
- u16 maxPds; /*780h */
- u16 maxDedHSPs; /*782h */
- u16 maxGlobalHSPs; /*784h */
- u16 ddfSize; /*786h */
+ __le16 maxPds; /*780h */
+ __le16 maxDedHSPs; /*782h */
+ __le16 maxGlobalHSP; /*784h */
+ __le16 ddfSize; /*786h */
u8 maxLdsPerArray; /*788h */
u8 partitionsInDDF; /*789h */
u8 lockKeyBinding; /*78ah */
u8 maxPITsPerLd; /*78bh */
u8 maxViewsPerLd; /*78ch */
u8 maxTargetId; /*78dh */
- u16 maxBvlVdSize; /*78eh */
+ __le16 maxBvlVdSize; /*78eh */
- u16 maxConfigurableSSCSize; /*790h */
- u16 currentSSCsize; /*792h */
+ __le16 maxConfigurableSSCSize; /*790h */
+ __le16 currentSSCsize; /*792h */
char expanderFwVersion[12]; /*794h */
- u16 PFKTrialTimeRemaining; /*7A0h */
+ __le16 PFKTrialTimeRemaining; /*7A0h */
- u16 cacheMemorySize; /*7A2h */
+ __le16 cacheMemorySize; /*7A2h */
struct { /*7A4h */
#if defined(__BIG_ENDIAN_BITFIELD)
@@ -931,7 +935,7 @@ struct megasas_ctrl_info {
u8 temperatureROC; /*7C9h */
u8 temperatureCtrl; /*7CAh */
u8 reserved4; /*7CBh */
- u16 maxConfigurablePds; /*7CCh */
+ __le16 maxConfigurablePds; /*7CCh */
u8 reserved5[2]; /*0x7CDh */
@@ -1042,11 +1046,6 @@ struct megasas_ctrl_info {
#define VD_EXT_DEBUG 0
-enum MR_MFI_MPT_PTHR_FLAGS {
- MFI_MPT_DETACHED = 0,
- MFI_LIST_ADDED = 1,
- MFI_MPT_ATTACHED = 2,
-};
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
@@ -1084,6 +1083,7 @@ enum MR_SCSI_CMD_TYPE {
#define MEGASAS_SKINNY_INT_CMDS 5
#define MEGASAS_FUSION_INTERNAL_CMDS 5
#define MEGASAS_FUSION_IOCTL_CMDS 3
+#define MEGASAS_MFI_IOCTL_CMDS 27
#define MEGASAS_MAX_MSIX_QUEUES 128
/*
@@ -1172,22 +1172,22 @@ struct megasas_register_set {
struct megasas_sge32 {
- u32 phys_addr;
- u32 length;
+ __le32 phys_addr;
+ __le32 length;
} __attribute__ ((packed));
struct megasas_sge64 {
- u64 phys_addr;
- u32 length;
+ __le64 phys_addr;
+ __le32 length;
} __attribute__ ((packed));
struct megasas_sge_skinny {
- u64 phys_addr;
- u32 length;
- u32 flag;
+ __le64 phys_addr;
+ __le32 length;
+ __le32 flag;
} __packed;
union megasas_sgl {
@@ -1210,12 +1210,12 @@ struct megasas_header {
u8 cdb_len; /*06h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
- u32 data_xferlen; /*14h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
+ __le32 data_xferlen; /*14h */
} __attribute__ ((packed));
@@ -1248,7 +1248,7 @@ typedef union _MFI_CAPABILITIES {
u32 reserved:25;
#endif
} mfi_capabilities;
- u32 reg;
+ __le32 reg;
} MFI_CAPABILITIES;
struct megasas_init_frame {
@@ -1260,33 +1260,35 @@ struct megasas_init_frame {
u8 reserved_1; /*03h */
MFI_CAPABILITIES driver_operations; /*04h*/
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
-
- u16 flags; /*10h */
- u16 reserved_3; /*12h */
- u32 data_xfer_len; /*14h */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u32 queue_info_new_phys_addr_lo; /*18h */
- u32 queue_info_new_phys_addr_hi; /*1Ch */
- u32 queue_info_old_phys_addr_lo; /*20h */
- u32 queue_info_old_phys_addr_hi; /*24h */
+ __le16 flags; /*10h */
+ __le16 reserved_3; /*12h */
+ __le32 data_xfer_len; /*14h */
- u32 reserved_4[6]; /*28h */
+ __le32 queue_info_new_phys_addr_lo; /*18h */
+ __le32 queue_info_new_phys_addr_hi; /*1Ch */
+ __le32 queue_info_old_phys_addr_lo; /*20h */
+ __le32 queue_info_old_phys_addr_hi; /*24h */
+ __le32 reserved_4[2]; /*28h */
+ __le32 system_info_lo; /*30h */
+ __le32 system_info_hi; /*34h */
+ __le32 reserved_5[2]; /*38h */
} __attribute__ ((packed));
struct megasas_init_queue_info {
- u32 init_flags; /*00h */
- u32 reply_queue_entries; /*04h */
+ __le32 init_flags; /*00h */
+ __le32 reply_queue_entries; /*04h */
- u32 reply_queue_start_phys_addr_lo; /*08h */
- u32 reply_queue_start_phys_addr_hi; /*0Ch */
- u32 producer_index_phys_addr_lo; /*10h */
- u32 producer_index_phys_addr_hi; /*14h */
- u32 consumer_index_phys_addr_lo; /*18h */
- u32 consumer_index_phys_addr_hi; /*1Ch */
+ __le32 reply_queue_start_phys_addr_lo; /*08h */
+ __le32 reply_queue_start_phys_addr_hi; /*0Ch */
+ __le32 producer_index_phys_addr_lo; /*10h */
+ __le32 producer_index_phys_addr_hi; /*14h */
+ __le32 consumer_index_phys_addr_lo; /*18h */
+ __le32 consumer_index_phys_addr_hi; /*1Ch */
} __attribute__ ((packed));
@@ -1302,18 +1304,18 @@ struct megasas_io_frame {
u8 reserved_0; /*06h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
- u32 lba_count; /*14h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
+ __le32 lba_count; /*14h */
- u32 sense_buf_phys_addr_lo; /*18h */
- u32 sense_buf_phys_addr_hi; /*1Ch */
+ __le32 sense_buf_phys_addr_lo; /*18h */
+ __le32 sense_buf_phys_addr_hi; /*1Ch */
- u32 start_lba_lo; /*20h */
- u32 start_lba_hi; /*24h */
+ __le32 start_lba_lo; /*20h */
+ __le32 start_lba_hi; /*24h */
union megasas_sgl sgl; /*28h */
@@ -1331,15 +1333,15 @@ struct megasas_pthru_frame {
u8 cdb_len; /*06h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
- u32 data_xfer_len; /*14h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
+ __le32 data_xfer_len; /*14h */
- u32 sense_buf_phys_addr_lo; /*18h */
- u32 sense_buf_phys_addr_hi; /*1Ch */
+ __le32 sense_buf_phys_addr_lo; /*18h */
+ __le32 sense_buf_phys_addr_hi; /*1Ch */
u8 cdb[16]; /*20h */
union megasas_sgl sgl; /*30h */
@@ -1354,19 +1356,19 @@ struct megasas_dcmd_frame {
u8 reserved_1[4]; /*03h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
- u32 data_xfer_len; /*14h */
- u32 opcode; /*18h */
+ __le32 data_xfer_len; /*14h */
+ __le32 opcode; /*18h */
union { /*1Ch */
u8 b[12];
- u16 s[6];
- u32 w[3];
+ __le16 s[6];
+ __le32 w[3];
} mbox;
union megasas_sgl sgl; /*28h */
@@ -1380,22 +1382,22 @@ struct megasas_abort_frame {
u8 cmd_status; /*02h */
u8 reserved_1; /*03h */
- u32 reserved_2; /*04h */
+ __le32 reserved_2; /*04h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 reserved_3; /*12h */
- u32 reserved_4; /*14h */
+ __le16 flags; /*10h */
+ __le16 reserved_3; /*12h */
+ __le32 reserved_4; /*14h */
- u32 abort_context; /*18h */
- u32 pad_1; /*1Ch */
+ __le32 abort_context; /*18h */
+ __le32 pad_1; /*1Ch */
- u32 abort_mfi_phys_addr_lo; /*20h */
- u32 abort_mfi_phys_addr_hi; /*24h */
+ __le32 abort_mfi_phys_addr_lo; /*20h */
+ __le32 abort_mfi_phys_addr_hi; /*24h */
- u32 reserved_5[6]; /*28h */
+ __le32 reserved_5[6]; /*28h */
} __attribute__ ((packed));
@@ -1409,14 +1411,14 @@ struct megasas_smp_frame {
u8 reserved_2[3]; /*04h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
- u32 data_xfer_len; /*14h */
- u64 sas_addr; /*18h */
+ __le32 data_xfer_len; /*14h */
+ __le64 sas_addr; /*18h */
union {
struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
@@ -1436,16 +1438,16 @@ struct megasas_stp_frame {
u8 reserved_3[2]; /*05h */
u8 sge_count; /*07h */
- u32 context; /*08h */
- u32 pad_0; /*0Ch */
+ __le32 context; /*08h */
+ __le32 pad_0; /*0Ch */
- u16 flags; /*10h */
- u16 timeout; /*12h */
+ __le16 flags; /*10h */
+ __le16 timeout; /*12h */
- u32 data_xfer_len; /*14h */
+ __le32 data_xfer_len; /*14h */
- u16 fis[10]; /*18h */
- u32 stp_flags;
+ __le16 fis[10]; /*18h */
+ __le32 stp_flags;
union {
struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
@@ -1489,18 +1491,18 @@ union megasas_evt_class_locale {
} __attribute__ ((packed));
struct megasas_evt_log_info {
- u32 newest_seq_num;
- u32 oldest_seq_num;
- u32 clear_seq_num;
- u32 shutdown_seq_num;
- u32 boot_seq_num;
+ __le32 newest_seq_num;
+ __le32 oldest_seq_num;
+ __le32 clear_seq_num;
+ __le32 shutdown_seq_num;
+ __le32 boot_seq_num;
} __attribute__ ((packed));
struct megasas_progress {
- u16 progress;
- u16 elapsed_seconds;
+ __le16 progress;
+ __le16 elapsed_seconds;
} __attribute__ ((packed));
@@ -1521,9 +1523,9 @@ struct megasas_evtarg_pd {
struct megasas_evt_detail {
- u32 seq_num;
- u32 time_stamp;
- u32 code;
+ __le32 seq_num;
+ __le32 time_stamp;
+ __le32 code;
union megasas_evt_class_locale cl;
u8 arg_type;
u8 reserved1[15];
@@ -1542,18 +1544,18 @@ struct megasas_evt_detail {
struct {
struct megasas_evtarg_ld ld;
- u64 count;
+ __le64 count;
} __attribute__ ((packed)) ld_count;
struct {
- u64 lba;
+ __le64 lba;
struct megasas_evtarg_ld ld;
} __attribute__ ((packed)) ld_lba;
struct {
struct megasas_evtarg_ld ld;
- u32 prevOwner;
- u32 newOwner;
+ __le32 prevOwner;
+ __le32 newOwner;
} __attribute__ ((packed)) ld_owner;
struct {
@@ -1610,7 +1612,7 @@ struct megasas_evt_detail {
struct {
u16 vendorId;
- u16 deviceId;
+ __le16 deviceId;
u16 subVendorId;
u16 subDeviceId;
} __attribute__ ((packed)) pci;
@@ -1630,9 +1632,9 @@ struct megasas_evt_detail {
} __attribute__ ((packed)) ecc;
u8 b[96];
- u16 s[48];
- u32 w[24];
- u64 d[12];
+ __le16 s[48];
+ __le32 w[24];
+ __le64 d[12];
} args;
char description[128];
@@ -1649,12 +1651,22 @@ struct megasas_irq_context {
u32 MSIxIndex;
};
+struct MR_DRV_SYSTEM_INFO {
+ u8 infoVersion;
+ u8 systemIdLength;
+ u16 reserved0;
+ u8 systemId[64];
+ u8 reserved[1980];
+};
+
struct megasas_instance {
- u32 *producer;
+ __le32 *producer;
dma_addr_t producer_h;
- u32 *consumer;
+ __le32 *consumer;
dma_addr_t consumer_h;
+ struct MR_DRV_SYSTEM_INFO *system_info_buf;
+ dma_addr_t system_info_h;
struct MR_LD_VF_AFFILIATION *vf_affiliation;
dma_addr_t vf_affiliation_h;
struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
@@ -1662,7 +1674,7 @@ struct megasas_instance {
struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
dma_addr_t hb_host_mem_h;
- u32 *reply_queue;
+ __le32 *reply_queue;
dma_addr_t reply_queue_h;
u32 *crash_dump_buf;
@@ -1681,7 +1693,7 @@ struct megasas_instance {
spinlock_t crashdump_lock;
struct megasas_register_set __iomem *reg_set;
- u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+ u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
@@ -1769,6 +1781,7 @@ struct megasas_instance {
u16 throttlequeuedepth;
u8 mask_interrupts;
u8 is_imr;
+ bool dev_handle;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -1864,9 +1877,13 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(scp) \
(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
-#define MEGASAS_DEV_INDEX(inst, scp) \
- ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
- scp->device->id
+#define MEGASAS_DEV_INDEX(scp) \
+ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+ scp->device->id)
+
+#define MEGASAS_PD_INDEX(scp) \
+ ((scp->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+ scp->device->id)
struct megasas_cmd {
@@ -1877,17 +1894,14 @@ struct megasas_cmd {
u32 index;
u8 sync_cmd;
- u8 cmd_status;
+ u8 cmd_status_drv;
u8 abort_aen;
u8 retry_for_fw_reset;
struct list_head list;
struct scsi_cmnd *scmd;
-
- void *mpt_pthr_cmd_blocked;
- atomic_t mfi_mpt_pthr;
- u8 is_wait_event;
+ u8 flags;
struct megasas_instance *instance;
union {
@@ -1963,10 +1977,10 @@ u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
-u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
-u16 get_updated_dev_handle(struct megasas_instance *instance,
+__le16 get_updated_dev_handle(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 4c3fc0eb8b30..71b884dae27c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -94,8 +94,8 @@ MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Defau
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux@lsi.com");
-MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
+MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -215,7 +215,6 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
cmd = list_entry((&instance->cmd_pool)->next,
struct megasas_cmd, list);
list_del_init(&cmd->list);
- atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
} else {
printk(KERN_ERR "megasas: Command pool empty!\n");
}
@@ -225,52 +224,41 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
}
/**
- * __megasas_return_cmd - Return a cmd to free command pool
+ * megasas_return_cmd - Return a cmd to free command pool
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
inline void
-__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- /*
- * Don't go ahead and free the MFI frame, if corresponding
- * MPT frame is not freed(valid for only fusion adapters).
- * In case of MFI adapters, anyways for any allocated MFI
- * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+ unsigned long flags;
+ u32 blk_tags;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ /* This flag is used only for fusion adapter.
+ * Wait for Interrupt for Polled mode DCMD
*/
- if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+ if (cmd->flags & DRV_DCMD_POLLED_MODE)
return;
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+
+ if (fusion) {
+ blk_tags = instance->max_scsi_cmds + cmd->index;
+ cmd_fusion = fusion->cmd_list[blk_tags];
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ }
cmd->scmd = NULL;
cmd->frame_count = 0;
- cmd->is_wait_event = 0;
- cmd->mpt_pthr_cmd_blocked = NULL;
-
- if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
- (reset_devices))
+ cmd->flags = 0;
+ if (!fusion && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
-
- atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
list_add(&cmd->list, (&instance->cmd_pool)->next);
-}
-
-/**
- * megasas_return_cmd - Return a cmd to free command pool
- * @instance: Adapter soft state
- * @cmd: Command packet to be returned to free command pool
- */
-inline void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- unsigned long flags;
- spin_lock_irqsave(&instance->mfi_pool_lock, flags);
- __megasas_return_cmd(instance, cmd);
spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
-}
+}
/**
* The following functions are defined for xscale
@@ -814,8 +802,8 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
{
u32 retry = 0 ;
u32 HostDiag;
- u32 *seq_offset = &reg_set->seq_offset;
- u32 *hostdiag_offset = &reg_set->host_diag;
+ u32 __iomem *seq_offset = &reg_set->seq_offset;
+ u32 __iomem *hostdiag_offset = &reg_set->host_diag;
if (instance->instancet == &megasas_instance_template_skinny) {
seq_offset = &reg_set->fusion_seq_offset;
@@ -910,7 +898,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
* @instance: Adapter soft state
* @cmd: Command packet to be issued
*
- * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
*/
int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -952,20 +940,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
- cmd->cmd_status = ENODATA;
+ cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- cmd->is_wait_event = 1;
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
- cmd->cmd_status != ENODATA, timeout * HZ);
+ cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret)
return 1;
} else
wait_event(instance->int_cmd_wait_q,
- cmd->cmd_status != ENODATA);
+ cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
- return 0;
+ return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+ 0 : 1;
}
/**
@@ -998,7 +986,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
* Prepare and issue the abort frame
*/
abort_fr->cmd = MFI_CMD_ABORT;
- abort_fr->cmd_status = 0xFF;
+ abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
abort_fr->flags = cpu_to_le16(0);
abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
abort_fr->abort_mfi_phys_addr_lo =
@@ -1007,13 +995,13 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1;
- cmd->cmd_status = ENODATA;
+ cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
- cmd->cmd_status != ENODATA, timeout * HZ);
+ cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
dev_err(&instance->pdev->dev, "Command timedout"
"from %s\n", __func__);
@@ -1021,7 +1009,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
}
} else
wait_event(instance->abort_cmd_wait_q,
- cmd->cmd_status != ENODATA);
+ cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
cmd->sync_cmd = 0;
@@ -1196,7 +1184,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
struct megasas_pthru_frame *pthru;
is_logical = MEGASAS_IS_LOGICAL(scp);
- device_id = MEGASAS_DEV_INDEX(instance, scp);
+ device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
if (scp->sc_data_direction == PCI_DMA_TODEVICE)
@@ -1232,7 +1220,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
*/
if (scp->device->type == TYPE_TAPE) {
if ((scp->request->timeout / HZ) > 0xFFFF)
- pthru->timeout = 0xFFFF;
+ pthru->timeout = cpu_to_le16(0xFFFF);
else
pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
}
@@ -1294,7 +1282,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
u16 flags = 0;
struct megasas_io_frame *ldio;
- device_id = MEGASAS_DEV_INDEX(instance, scp);
+ device_id = MEGASAS_DEV_INDEX(scp);
ldio = (struct megasas_io_frame *)cmd->frame;
if (scp->sc_data_direction == PCI_DMA_TODEVICE)
@@ -1698,7 +1686,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
* @instance: Adapter soft state
*
*/
-void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
{
int i;
struct megasas_cmd *cmd_mfi;
@@ -1922,22 +1910,24 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+ dcmd->data_xfer_len =
+ cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
if (initial)
dcmd->sgl.sge32[0].phys_addr =
- instance->vf_affiliation_111_h;
+ cpu_to_le32(instance->vf_affiliation_111_h);
else
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(new_affiliation_111_h);
- dcmd->sgl.sge32[0].length =
- sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
@@ -1976,11 +1966,7 @@ out:
new_affiliation_111_h);
}
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return retval;
}
@@ -2037,22 +2023,24 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+ dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
if (initial)
- dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(instance->vf_affiliation_h);
else
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(new_affiliation_h);
- dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
@@ -2147,11 +2135,7 @@ out:
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
new_affiliation, new_affiliation_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return retval;
}
@@ -2204,39 +2188,33 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
- dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
- dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
- dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
- dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
- if (!megasas_issue_polled(instance, cmd)) {
- retval = 0;
- } else {
- printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
- "_MEM_ALLOC DCMD timed out for scsi%d\n",
- instance->host->host_no);
- retval = 1;
- goto out;
- }
-
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ retval = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_ROUTINE_WAIT_TIME_VF);
+ else
+ retval = megasas_issue_polled(instance, cmd);
- if (dcmd->cmd_status) {
- printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
- "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
- dcmd->cmd_status,
- instance->host->host_no);
+ if (retval) {
+ dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD %s for scsi%d\n",
+ (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
+ "timed out" : "failed", instance->host->host_no);
retval = 1;
- goto out;
}
out:
@@ -2332,7 +2310,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
"reset queue\n",
reset_cmd);
- reset_cmd->cmd_status = ENODATA;
+ reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
instance->instancet->fire_cmd(instance,
reset_cmd->frame_phys_addr,
0, instance->reg_set);
@@ -2612,11 +2590,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
instance->aen_cmd = NULL;
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
if ((instance->unload == 0) &&
((instance->issuepend_done == 1))) {
@@ -2786,7 +2760,7 @@ struct device_attribute *megaraid_host_attrs[] = {
static struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
- .name = "LSI SAS based MegaRAID driver",
+ .name = "Avago SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
.slave_configure = megasas_slave_configure,
.slave_alloc = megasas_slave_alloc,
@@ -2815,11 +2789,7 @@ static void
megasas_complete_int_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
- cmd->cmd_status = cmd->frame->io.cmd_status;
-
- if (cmd->cmd_status == ENODATA) {
- cmd->cmd_status = 0;
- }
+ cmd->cmd_status_drv = cmd->frame->io.cmd_status;
wake_up(&instance->int_cmd_wait_q);
}
@@ -2838,7 +2808,7 @@ megasas_complete_abort(struct megasas_instance *instance,
{
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
- cmd->cmd_status = 0;
+ cmd->cmd_status_drv = 0;
wake_up(&instance->abort_cmd_wait_q);
}
@@ -2978,8 +2948,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
"failed, status = 0x%x.\n",
cmd->frame->hdr.cmd_status);
else {
- megasas_return_mfi_mpt_pthr(instance,
- cmd, cmd->mpt_pthr_cmd_blocked);
+ megasas_return_cmd(instance, cmd);
spin_unlock_irqrestore(
instance->host->host_lock,
flags);
@@ -2987,8 +2956,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
}
} else
instance->map_id++;
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
+ megasas_return_cmd(instance, cmd);
/*
* Set fast path IO to ZERO.
@@ -3086,7 +3054,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
printk(KERN_NOTICE "megasas: %p synchronous cmd"
"on the internal reset queue,"
"issue it again.\n", cmd);
- cmd->cmd_status = ENODATA;
+ cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr ,
0, instance->reg_set);
@@ -3766,7 +3734,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
cmd = instance->cmd_list[i];
memset(cmd, 0, sizeof(struct megasas_cmd));
cmd->index = i;
- atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
cmd->scmd = NULL;
cmd->instance = instance;
@@ -3827,7 +3794,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
dcmd->mbox.b[1] = 0;
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
@@ -3874,11 +3841,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
ci, ci_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3927,7 +3890,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
if (instance->supportmax256vd)
dcmd->mbox.b[0] = 1;
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
@@ -3965,11 +3928,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
ci,
ci_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4020,7 +3979,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->mbox.b[2] = 1;
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
@@ -4050,11 +4009,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
ci, ci_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4091,12 +4046,11 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
}
- dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n",
- instance->fw_supported_vd_count,
- instance->fw_supported_pd_count);
- dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n",
- instance->drv_supported_vd_count,
- instance->drv_supported_pd_count);
+
+ dev_info(&instance->pdev->dev,
+ "firmware type\t: %s\n",
+ instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
+ "Legacy(64 VD) firmware");
old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
(sizeof(struct MR_LD_SPAN_MAP) *
@@ -4158,7 +4112,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
@@ -4181,16 +4135,17 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
megasas_update_ext_vd_details(instance);
+ instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+ dev_info(&instance->pdev->dev,
+ "controller type\t: %s(%dMB)\n",
+ instance->is_imr ? "iMR" : "MR",
+ le16_to_cpu(ctrl_info->memory_size));
}
pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
ci, ci_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4229,7 +4184,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->mbox.b[0] = crash_buf_state;
dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0;
@@ -4245,11 +4200,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
else
ret = megasas_issue_polled(instance, cmd);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4262,7 +4213,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
static int
megasas_issue_init_mfi(struct megasas_instance *instance)
{
- u32 context;
+ __le32 context;
struct megasas_cmd *cmd;
@@ -4300,7 +4251,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
init_frame->cmd = MFI_CMD_INIT;
- init_frame->cmd_status = 0xFF;
+ init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
init_frame->queue_info_new_phys_addr_lo =
cpu_to_le32(lower_32_bits(initq_info_h));
init_frame->queue_info_new_phys_addr_hi =
@@ -4354,6 +4305,21 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
0x10;
/*
+ * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
+ * are reserved for IOCTL + driver's internal DCMDs.
+ */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ instance->max_scsi_cmds = (instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS);
+ sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+ } else {
+ instance->max_scsi_cmds = (instance->max_fw_cmds -
+ MEGASAS_INT_CMDS);
+ sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
+ }
+
+ /*
* Create a pool of commands
*/
if (megasas_alloc_cmds(instance))
@@ -4414,6 +4380,107 @@ fail_alloc_cmds:
return 1;
}
+/*
+ * megasas_setup_irqs_msix - register legacy interrupts.
+ * @instance: Adapter soft state
+ *
+ * Do not enable interrupt, only setup ISRs.
+ *
+ * Return 0 on success.
+ */
+static int
+megasas_setup_irqs_ioapic(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev;
+
+ pdev = instance->pdev;
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+ dev_err(&instance->pdev->dev,
+ "Failed to register IRQ from %s %d\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * megasas_setup_irqs_msix - register MSI-x interrupts.
+ * @instance: Adapter soft state
+ * @is_probe: Driver probe check
+ *
+ * Do not enable interrupt, only setup ISRs.
+ *
+ * Return 0 on success.
+ */
+static int
+megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
+{
+ int i, j, cpu;
+ struct pci_dev *pdev;
+
+ pdev = instance->pdev;
+
+ /* Try MSI-x */
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0, "megasas",
+ &instance->irq_context[i])) {
+ dev_err(&instance->pdev->dev,
+ "Failed to register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ /* Retry irq register for IO_APIC*/
+ instance->msix_vectors = 0;
+ if (is_probe)
+ return megasas_setup_irqs_ioapic(instance);
+ else
+ return -1;
+ }
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev,
+ "Failed to set affinity hint"
+ " for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+ return 0;
+}
+
+/*
+ * megasas_destroy_irqs- unregister interrupts.
+ * @instance: Adapter soft state
+ * return: void
+ */
+static void
+megasas_destroy_irqs(struct megasas_instance *instance) {
+
+ int i;
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+}
+
/**
* megasas_init_fw - Initializes the FW
* @instance: Adapter soft state
@@ -4499,7 +4566,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* It is used for all MPT based Adapters.
*/
instance->reply_post_host_index_addr[0] =
- (u32 *)((u8 *)instance->reg_set +
+ (u32 __iomem *)((u8 __iomem *)instance->reg_set +
MPI2_REPLY_POST_HOST_INDEX_OFFSET);
/* Check if MSI-X is supported while in ready state */
@@ -4531,7 +4598,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
*/
for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
instance->reply_post_host_index_addr[loop] =
- (u32 *)((u8 *)instance->reg_set +
+ (u32 __iomem *)
+ ((u8 __iomem *)instance->reg_set +
MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+ (loop * 0x10));
}
@@ -4551,14 +4619,19 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
-
- dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
- "<%d> MSIX vector,Online CPUs: <%d>,"
- "Current MSIX <%d>\n", instance->host->host_no,
- fw_msix_count, (unsigned int)num_online_cpus(),
- instance->msix_vectors);
}
+ dev_info(&instance->pdev->dev,
+ "firmware supports msix\t: (%d)", fw_msix_count);
+ dev_info(&instance->pdev->dev,
+ "current msix/online cpus\t: (%d/%d)\n",
+ instance->msix_vectors, (unsigned int)num_online_cpus());
+
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 1) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_setup_irqs;
+
instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
GFP_KERNEL);
if (instance->ctrl_info == NULL)
@@ -4574,6 +4647,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ instance->instancet->enable_intr(instance);
+
printk(KERN_ERR "megasas: INIT adapter done\n");
/** for passthrough
@@ -4584,7 +4662,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
if (megasas_get_pd_list(instance) < 0) {
printk(KERN_ERR "megasas: failed to get PD list\n");
- goto fail_init_adapter;
+ goto fail_get_pd_list;
}
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
@@ -4610,17 +4688,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
- /*Check whether controller is iMR or MR */
- if (ctrl_info->memory_size) {
- instance->is_imr = 0;
- dev_info(&instance->pdev->dev, "Controller type: MR,"
- "Memory size is: %dMB\n",
- le16_to_cpu(ctrl_info->memory_size));
- } else {
- instance->is_imr = 1;
- dev_info(&instance->pdev->dev,
- "Controller type: iMR\n");
- }
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
instance->mpio = ctrl_info->adapterOperations2.mpio;
@@ -4628,9 +4695,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
-
- dev_info(&instance->pdev->dev, "FW supports: "
- "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
if (MR_ValidateMapInfo(instance))
fusion->fast_path_io = 1;
else
@@ -4657,13 +4721,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->crash_dump_drv_support =
(instance->crash_dump_fw_support &&
instance->crash_dump_buf);
- if (instance->crash_dump_drv_support) {
- dev_info(&instance->pdev->dev, "Firmware Crash dump "
- "feature is supported\n");
+ if (instance->crash_dump_drv_support)
megasas_set_crash_dump_params(instance,
MR_CRASH_BUF_TURN_OFF);
- } else {
+ else {
if (instance->crash_dump_buf)
pci_free_consistent(instance->pdev,
CRASH_DMA_BUF_SIZE,
@@ -4674,37 +4736,28 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->secure_jbod_support =
ctrl_info->adapterOperations3.supportSecurityonJBOD;
- if (instance->secure_jbod_support)
- dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n");
+
+ dev_info(&instance->pdev->dev,
+ "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
+ le16_to_cpu(ctrl_info->pci.vendor_id),
+ le16_to_cpu(ctrl_info->pci.device_id),
+ le16_to_cpu(ctrl_info->pci.sub_vendor_id),
+ le16_to_cpu(ctrl_info->pci.sub_device_id));
+ dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
+ instance->UnevenSpanSupport ? "yes" : "no");
+ dev_info(&instance->pdev->dev, "disable ocr : %s\n",
+ instance->disableOnlineCtrlReset ? "yes" : "no");
+ dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
+ instance->crash_dump_drv_support ? "yes" : "no");
+ dev_info(&instance->pdev->dev, "secure jbod : %s\n",
+ instance->secure_jbod_support ? "yes" : "no");
+
+
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
instance->max_sectors_per_req = tmp_sectors;
- /*
- * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
- * for driver's internal DCMDs.
- * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
- * internal DCMDs.
- * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
- * and 5 commands for drivers's internal DCMD.
- */
- if (instance->ctrl_context) {
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
- sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
- } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
- instance->max_scsi_cmds = instance->max_fw_cmds -
- MEGASAS_SKINNY_INT_CMDS;
- sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
- } else {
- instance->max_scsi_cmds = instance->max_fw_cmds -
- MEGASAS_INT_CMDS;
- sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
- }
-
/* Check for valid throttlequeuedepth module parameter */
if (throttlequeuedepth &&
throttlequeuedepth <= instance->max_scsi_cmds)
@@ -4713,12 +4766,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
- /*
- * Setup tasklet for cmd completion
- */
-
- tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
- (unsigned long)instance);
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
@@ -4733,7 +4780,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
return 0;
+fail_get_pd_list:
+ instance->instancet->disable_intr(instance);
fail_init_adapter:
+ megasas_destroy_irqs(instance);
+fail_setup_irqs:
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+ instance->msix_vectors = 0;
fail_ready_state:
kfree(instance->ctrl_info);
instance->ctrl_info = NULL;
@@ -4822,21 +4876,17 @@ megasas_get_seq_num(struct megasas_instance *instance,
/*
* Copy the data back into callers buffer
*/
- eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
- eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
- eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
- eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
- eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
}
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return 0;
}
@@ -4877,8 +4927,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
if (instance->aen_cmd) {
- prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
- prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
+ prev_aen.word =
+ le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
/*
* A class whose enum value is smaller is inclusive of all
@@ -4990,7 +5040,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.class = MR_EVT_CLASS_DEBUG;
return megasas_register_aen(instance,
- eli.newest_seq_num + 1,
+ le32_to_cpu(eli.newest_seq_num) + 1,
class_locale.word);
}
@@ -5001,6 +5051,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
static int megasas_io_attach(struct megasas_instance *instance)
{
struct Scsi_Host *host = instance->host;
+ u32 error;
/*
* Export parameters required by SCSI mid-layer
@@ -5050,12 +5101,21 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->hostt->eh_device_reset_handler = NULL;
host->hostt->eh_bus_reset_handler = NULL;
}
+ error = scsi_init_shared_tag_map(host, host->can_queue);
+ if (error) {
+ dev_err(&instance->pdev->dev,
+ "Failed to shared tag from %s %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
/*
* Notify the mid-layer about the new controller
*/
if (scsi_add_host(host, &instance->pdev->dev)) {
- printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to add host from %s %d\n",
+ __func__, __LINE__);
return -ENODEV;
}
@@ -5106,7 +5166,7 @@ fail_set_dma_mask:
static int megasas_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int rval, pos, i, j, cpu;
+ int rval, pos;
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
@@ -5129,16 +5189,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
}
/*
- * Announce PCI information
- */
- printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
-
- printk("bus %d:slot %d:func %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
- /*
* PCI prepping: enable device set bus mastering and dma mask
*/
rval = pci_enable_device_mem(pdev);
@@ -5183,8 +5233,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
fusion = instance->ctrl_context;
memset(fusion, 0,
((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
- INIT_LIST_HEAD(&fusion->cmd_pool);
- spin_lock_init(&fusion->mpt_pool_lock);
}
break;
default: /* For all other supported controllers */
@@ -5207,6 +5255,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
break;
}
+ instance->system_info_buf = pci_zalloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+
+ if (!instance->system_info_buf)
+ dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+
/* Crash dump feature related initialisation*/
instance->drv_buf_index = 0;
instance->drv_buf_alloc = 0;
@@ -5315,55 +5370,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
}
}
-retry_irq_register:
- /*
- * Register IRQ
- */
- if (instance->msix_vectors) {
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < instance->msix_vectors; i++) {
- instance->irq_context[i].instance = instance;
- instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
- instance->instancet->service_isr, 0,
- "megasas",
- &instance->irq_context[i])) {
- printk(KERN_DEBUG "megasas: Failed to "
- "register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(
- instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
- /* Retry irq register for IO_APIC */
- instance->msix_vectors = 0;
- goto retry_irq_register;
- }
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev,
- "Error setting affinity hint "
- "for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
- }
- } else {
- instance->irq_context[0].instance = instance;
- instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas",
- &instance->irq_context[0])) {
- printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
- goto fail_irq;
- }
- }
-
- instance->instancet->enable_intr(instance);
-
/*
* Store instance in PCI softstate
*/
@@ -5410,17 +5416,8 @@ retry_irq_register:
megasas_mgmt_info.max_index--;
instance->instancet->disable_intr(instance);
- if (instance->msix_vectors)
- for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
- &instance->irq_context[i]);
- }
- else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
-fail_irq:
+ megasas_destroy_irqs(instance);
+
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
@@ -5428,9 +5425,9 @@ fail_irq:
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
- fail_init_mfi:
if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
+fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
@@ -5487,11 +5484,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dev_err(&instance->pdev->dev, "Command timedout"
" from %s\n", __func__);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5538,11 +5531,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "Command timedout"
"from %s\n", __func__);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5558,7 +5547,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host;
struct megasas_instance *instance;
- int i;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -5583,16 +5571,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
pci_set_drvdata(instance->pdev, instance);
instance->instancet->disable_intr(instance);
- if (instance->msix_vectors)
- for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
- &instance->irq_context[i]);
- }
- else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ megasas_destroy_irqs(instance);
+
if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
@@ -5611,7 +5591,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
static int
megasas_resume(struct pci_dev *pdev)
{
- int rval, i, j, cpu;
+ int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
@@ -5681,50 +5661,10 @@ megasas_resume(struct pci_dev *pdev)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /*
- * Register IRQ
- */
- if (instance->msix_vectors) {
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0 ; i < instance->msix_vectors; i++) {
- instance->irq_context[i].instance = instance;
- instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
- instance->instancet->service_isr, 0,
- "megasas",
- &instance->irq_context[i])) {
- printk(KERN_DEBUG "megasas: Failed to "
- "register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(
- instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
- goto fail_irq;
- }
-
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev, "Error "
- "setting affinity hint for cpu "
- "%d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
- }
- } else {
- instance->irq_context[0].instance = instance;
- instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas",
- &instance->irq_context[0])) {
- printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
- goto fail_irq;
- }
- }
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 0) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_init_mfi;
/* Re-launch SR-IOV heartbeat timer */
if (instance->requestorId) {
@@ -5733,8 +5673,10 @@ megasas_resume(struct pci_dev *pdev)
&instance->sriov_heartbeat_timer,
megasas_sriov_heartbeat_handler,
MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
- else
+ else {
instance->skip_heartbeat_timer_del = 1;
+ goto fail_init_mfi;
+ }
}
instance->instancet->enable_intr(instance);
@@ -5748,7 +5690,6 @@ megasas_resume(struct pci_dev *pdev)
return 0;
-fail_irq:
fail_init_mfi:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
@@ -5829,16 +5770,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->instancet->disable_intr(instance);
- if (instance->msix_vectors)
- for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
- &instance->irq_context[i]);
- }
- else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ megasas_destroy_irqs(instance);
+
if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
@@ -5899,6 +5832,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf, instance->crash_dump_h);
+ if (instance->system_info_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ instance->system_info_buf, instance->system_info_h);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -5912,23 +5849,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
*/
static void megasas_shutdown(struct pci_dev *pdev)
{
- int i;
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
instance->instancet->disable_intr(instance);
- if (instance->msix_vectors)
- for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
- &instance->irq_context[i]);
- }
- else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ megasas_destroy_irqs(instance);
+
if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
}
@@ -6211,11 +6139,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
kbuff_arr[i] = NULL;
}
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return error;
}
@@ -6502,6 +6426,15 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
static ssize_t
+megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
+ MEGASAS_RELDATE);
+}
+
+static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
+
+static ssize_t
megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
{
return sprintf(buf, "%u\n", support_poll_for_event);
@@ -6841,6 +6774,11 @@ static int __init megasas_init(void)
goto err_dcf_attr_ver;
rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+ if (rval)
+ goto err_dcf_rel_date;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_support_poll_for_event);
if (rval)
goto err_dcf_support_poll_for_event;
@@ -6863,6 +6801,9 @@ err_dcf_dbg_lvl:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_poll_for_event);
err_dcf_support_poll_for_event:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+err_dcf_rel_date:
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
err_dcf_attr_ver:
pci_unregister_driver(&megasas_pci_driver);
@@ -6882,6 +6823,8 @@ static void __exit megasas_exit(void)
&driver_attr_support_poll_for_event);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
pci_unregister_driver(&megasas_pci_driver);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e8b7a69428b6..be57b18675a4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -150,7 +150,7 @@ u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
}
-u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
@@ -743,7 +743,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u8 retval = TRUE;
u8 do_invader = 0;
u64 *pdBlock = &io_info->pdBlock;
- u16 *pDevHandle = &io_info->devHandle;
+ __le16 *pDevHandle = &io_info->devHandle;
u32 logArm, rowMod, armQ, arm;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
@@ -777,7 +777,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
if (pd != MR_PD_INVALID)
*pDevHandle = MR_PdDevHandleGet(pd, map);
else {
- *pDevHandle = MR_PD_INVALID;
+ *pDevHandle = cpu_to_le16(MR_PD_INVALID);
if ((raid->level >= 5) &&
(!do_invader || (do_invader &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
@@ -825,7 +825,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u8 retval = TRUE;
u8 do_invader = 0;
u64 *pdBlock = &io_info->pdBlock;
- u16 *pDevHandle = &io_info->devHandle;
+ __le16 *pDevHandle = &io_info->devHandle;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
@@ -872,7 +872,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
/* Get dev handle from Pd. */
*pDevHandle = MR_PdDevHandleGet(pd, map);
else {
- *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+ /* set dev handle as invalid. */
+ *pDevHandle = cpu_to_le16(MR_PD_INVALID);
if ((raid->level >= 5) &&
(!do_invader || (do_invader &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
@@ -1117,7 +1118,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
ref_in_start_stripe, io_info,
pRAID_Context, map);
/* If IO on an invalid Pd, then FP is not possible.*/
- if (io_info->devHandle == MR_PD_INVALID)
+ if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
io_info->fpOkForIo = FALSE;
return retval;
} else if (isRead) {
@@ -1349,11 +1350,11 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
return io_info->pd_after_lb;
}
-u16 get_updated_dev_handle(struct megasas_instance *instance,
+__le16 get_updated_dev_handle(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
{
u8 arm_pd;
- u16 devHandle;
+ __le16 devHandle;
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index dba4de04de3c..46a0f8f4f677 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -53,10 +53,12 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_dbg.h>
+#include <linux/dmi.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
+
extern void megasas_free_cmds(struct megasas_instance *instance);
extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
*instance);
@@ -156,28 +158,15 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
* megasas_get_cmd_fusion - Get a command from the free pool
* @instance: Adapter soft state
*
- * Returns a free command from the pool
+ * Returns a blk_tag indexed mpt frame
*/
-struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
- *instance)
+inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+ *instance, u32 blk_tag)
{
- unsigned long flags;
- struct fusion_context *fusion =
- (struct fusion_context *)instance->ctrl_context;
- struct megasas_cmd_fusion *cmd = NULL;
-
- spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
-
- if (!list_empty(&fusion->cmd_pool)) {
- cmd = list_entry((&fusion->cmd_pool)->next,
- struct megasas_cmd_fusion, list);
- list_del_init(&cmd->list);
- } else {
- printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
- }
+ struct fusion_context *fusion;
- spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
- return cmd;
+ fusion = instance->ctrl_context;
+ return fusion->cmd_list[blk_tag];
}
/**
@@ -188,47 +177,35 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
- unsigned long flags;
- struct fusion_context *fusion =
- (struct fusion_context *)instance->ctrl_context;
-
- spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
-
cmd->scmd = NULL;
- cmd->sync_cmd_idx = (u32)ULONG_MAX;
memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
- list_add(&cmd->list, (&fusion->cmd_pool)->next);
-
- spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
}
/**
- * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
- * @instance: Adapter soft state
- * @cmd_mfi: MFI Command packet to be returned to free command pool
- * @cmd_mpt: MPT Command packet to be returned to free command pool
+ * megasas_fire_cmd_fusion - Sends command to the FW
*/
-inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
- struct megasas_cmd *cmd_mfi,
- struct megasas_cmd_fusion *cmd_fusion)
+static void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
+#if defined(writeq) && defined(CONFIG_64BIT)
+ u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
+ le32_to_cpu(req_desc->u.low));
+
+ writeq(req_data, &instance->reg_set->inbound_low_queue_port);
+#else
unsigned long flags;
- /*
- * TO DO: optimize this code and use only one lock instead of two
- * locks being used currently- mpt_pool_lock is acquired
- * inside mfi_pool_lock
- */
- spin_lock_irqsave(&instance->mfi_pool_lock, flags);
- megasas_return_cmd_fusion(instance, cmd_fusion);
- if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
- dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
- __func__, __LINE__);
- atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
- __megasas_return_cmd(instance, cmd_mfi);
- spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(le32_to_cpu(req_desc->u.low),
+ &instance->reg_set->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc->u.high),
+ &instance->reg_set->inbound_high_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
}
+
/**
* megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
* @instance: Adapter soft state
@@ -326,7 +303,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
kfree(fusion->cmd_list);
fusion->cmd_list = NULL;
- INIT_LIST_HEAD(&fusion->cmd_pool);
}
/**
@@ -464,7 +440,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
reply_desc = fusion->reply_frames_desc;
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
- reply_desc->Words = ULLONG_MAX;
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
io_frames_sz = fusion->io_frames_alloc_sz;
@@ -535,7 +511,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
cmd->index = i + 1;
cmd->scmd = NULL;
- cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
+ cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
+ (i - instance->max_scsi_cmds) :
+ (u32)ULONG_MAX; /* Set to Invalid */
cmd->instance = instance;
cmd->io_request =
(struct MPI2_RAID_SCSI_IO_REQUEST *)
@@ -543,8 +521,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
memset(cmd->io_request, 0,
sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
cmd->io_request_phys_addr = io_req_base_phys + offset;
-
- list_add_tail(&cmd->list, &fusion->cmd_pool);
}
/*
@@ -605,14 +581,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
msleep(20);
}
- if (frame_hdr->cmd_status == 0xff) {
- if (fusion)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
+ if (frame_hdr->cmd_status == 0xff)
return -ETIME;
- }
- return 0;
+ return (frame_hdr->cmd_status == MFI_STAT_OK) ?
+ 0 : 1;
}
/**
@@ -633,6 +606,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
int i;
struct megasas_header *frame_hdr;
+ const char *sys_info;
fusion = instance->ctrl_context;
@@ -673,7 +647,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+ frame_hdr->flags = cpu_to_le16(
+ le16_to_cpu(frame_hdr->flags) |
+ MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
@@ -695,6 +671,16 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
+ sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
+ if (instance->system_info_buf && sys_info) {
+ memcpy(instance->system_info_buf->systemId, sys_info,
+ strlen(sys_info) > 64 ? 64 : strlen(sys_info));
+ instance->system_info_buf->systemIdLength =
+ strlen(sys_info) > 64 ? 64 : strlen(sys_info);
+ init_frame->system_info_lo = instance->system_info_h;
+ init_frame->system_info_hi = 0;
+ }
+
init_frame->queue_info_new_phys_addr_hi =
cpu_to_le32(upper_32_bits(ioc_init_handle));
init_frame->queue_info_new_phys_addr_lo =
@@ -719,8 +705,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
break;
}
- instance->instancet->fire_cmd(instance, req_desc.u.low,
- req_desc.u.high, instance->reg_set);
+ megasas_fire_cmd_fusion(instance, &req_desc);
wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
@@ -820,11 +805,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
else
ret = megasas_issue_polled(instance, cmd);
- if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
- megasas_return_mfi_mpt_pthr(instance, cmd,
- cmd->mpt_pthr_cmd_blocked);
- else
- megasas_return_cmd(instance, cmd);
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -1061,6 +1042,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fusion->last_reply_idx[i] = 0;
/*
+ * For fusion adapters, 3 commands for IOCTL and 5 commands
+ * for driver's internal DCMDs.
+ */
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+
+ /*
* Allocate memory for descriptors
* Create a pool of commands
*/
@@ -1131,34 +1121,6 @@ fail_alloc_mfi_cmds:
}
/**
- * megasas_fire_cmd_fusion - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
- */
-void
-megasas_fire_cmd_fusion(struct megasas_instance *instance,
- dma_addr_t req_desc_lo,
- u32 req_desc_hi,
- struct megasas_register_set __iomem *regs)
-{
-#if defined(writeq) && defined(CONFIG_64BIT)
- u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
- le32_to_cpu(req_desc_lo));
-
- writeq(req_data, &(regs)->inbound_low_queue_port);
-#else
- unsigned long flags;
-
- spin_lock_irqsave(&instance->hba_lock, flags);
-
- writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
- writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-#endif
-}
-
-/**
* map_cmd_status - Maps FW cmd status to OS cmd status
* @cmd : Pointer to cmd
* @status : status of cmd returned by FW
@@ -1497,7 +1459,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
- device_id = MEGASAS_DEV_INDEX(instance, scp);
+ device_id = MEGASAS_DEV_INDEX(scp);
fusion = instance->ctrl_context;
@@ -1621,6 +1583,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->pd_r1_lb = io_info.pd_after_lb;
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+
+ if ((raidLUN[0] == 1) &&
+ (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 2)) {
+ instance->dev_handle = !(instance->dev_handle);
+ io_info.devHandle =
+ local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
+ }
+
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
io_request->DevHandle = io_info.devHandle;
/* populate the LUN field */
@@ -1650,121 +1620,68 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
/**
- * megasas_build_dcdb_fusion - Prepares IOs to devices
+ * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
* @instance: Adapter soft state
* @scp: SCSI command
* @cmd: Command to be prepared
*
- * Prepares the io_request frame for non-io cmds
+ * Prepares the io_request frame for non-rw io cmds for vd.
*/
-static void
-megasas_build_dcdb_fusion(struct megasas_instance *instance,
- struct scsi_cmnd *scmd,
- struct megasas_cmd_fusion *cmd)
+static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
{
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
u16 pd_index = 0;
- u16 os_timeout_value;
- u16 timeout_limit;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
- u16 devHandle;
+ __le16 devHandle;
u32 ld, arRef, pd;
struct MR_LD_RAID *raid;
struct RAID_CONTEXT *pRAID_Context;
+ u8 fp_possible = 1;
io_request = cmd->io_request;
- device_id = MEGASAS_DEV_INDEX(instance, scmd);
- pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
- +scmd->device->id;
+ device_id = MEGASAS_DEV_INDEX(scmd);
+ pd_index = MEGASAS_PD_INDEX(scmd);
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
-
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+ /* Check with FW team */
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
- if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
- instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
- if (fusion->fast_path_io)
- io_request->DevHandle =
- local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- io_request->RaidContext.RAIDFlags =
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
- << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->msix_vectors ?
- raw_smp_processor_id() %
- instance->msix_vectors :
- 0;
- os_timeout_value = scmd->request->timeout / HZ;
-
- if (instance->secure_jbod_support &&
- (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
- /* system pd firmware path */
- io_request->Function =
- MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.timeoutValue =
- cpu_to_le16(os_timeout_value);
- } else {
- /* system pd Fast Path */
- io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
- io_request->RaidContext.regLockFlags = 0;
- io_request->RaidContext.regLockRowLBA = 0;
- io_request->RaidContext.regLockLength = 0;
- timeout_limit = (scmd->device->type == TYPE_DISK) ?
- 255 : 0xFFFF;
- io_request->RaidContext.timeoutValue =
- cpu_to_le16((os_timeout_value > timeout_limit) ?
- timeout_limit : os_timeout_value);
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- io_request->IoFlags |=
- cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- }
- } else {
- if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
- goto NonFastPath;
-
- /*
- * For older firmware, Driver should not access ldTgtIdToLd
- * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
- * should not go beyond 255.
- */
-
- if ((!fusion->fast_path_io) ||
- (device_id >= instance->fw_supported_vd_count))
- goto NonFastPath;
+ if (fusion->fast_path_io && (
+ device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-
if (ld >= instance->fw_supported_vd_count)
- goto NonFastPath;
+ fp_possible = 0;
raid = MR_LdRaidGet(ld, local_map_ptr);
-
- /* check if this LD is FP capable */
if (!(raid->capability.fpNonRWCapable))
- /* not FP capable, send as non-FP */
- goto NonFastPath;
+ fp_possible = 0;
+ } else
+ fp_possible = 0;
- /* get RAID_Context pointer */
- pRAID_Context = &io_request->RaidContext;
+ if (!fp_possible) {
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ io_request->LUN[1] = scmd->device->lun;
+ pRAID_Context->timeoutValue =
+ cpu_to_le16 (scmd->request->timeout / HZ);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ } else {
/* set RAID context values */
- pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
- pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
- pRAID_Context->regLockRowLBA = 0;
- pRAID_Context->regLockLength = 0;
- pRAID_Context->configSeqNum = raid->seqNum;
+ pRAID_Context->configSeqNum = raid->seqNum;
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
/* get the DevHandle for the PD (since this is
fpNonRWCapable, this is a single disk RAID0) */
@@ -1776,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
/* build request descriptor */
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle = devHandle;
/* populate the LUN field */
@@ -1785,18 +1702,87 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
/* build the raidScsiIO structure */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
io_request->DevHandle = devHandle;
+ }
+}
- return;
+/**
+ * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ * @fp_possible: parameter to detect fast path or firmware path io.
+ *
+ * Prepares the io_request frame for rw/non-rw io cmds for syspds
+ */
+static void
+megasas_build_syspd_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
+{
+ u32 device_id;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ u16 pd_index = 0;
+ u16 os_timeout_value;
+ u16 timeout_limit;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ struct RAID_CONTEXT *pRAID_Context;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ device_id = MEGASAS_DEV_INDEX(scmd);
+ pd_index = MEGASAS_PD_INDEX(scmd);
+ os_timeout_value = scmd->request->timeout / HZ;
-NonFastPath:
+ io_request = cmd->io_request;
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ io_request->LUN[1] = scmd->device->lun;
+ pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->configSeqNum = 0;
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ?
+ (raw_smp_processor_id() % instance->msix_vectors) : 0;
+
+
+ if (!fp_possible) {
+ /* system pd firmware path */
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = cpu_to_le16(device_id);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
+ } else {
+ /* system pd Fast Path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ pRAID_Context->regLockFlags = 0;
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ timeout_limit = (scmd->device->type == TYPE_DISK) ?
+ 255 : 0xFFFF;
+ pRAID_Context->timeoutValue =
+ cpu_to_le16((os_timeout_value > timeout_limit) ?
+ timeout_limit : os_timeout_value);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ cmd->request_desc->SCSIIO.RequestFlags |=
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ pRAID_Context->Type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
+ io_request->IoFlags |=
+ cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+ }
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
- int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN);
}
/**
@@ -1813,11 +1799,10 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
{
- u32 device_id, sge_count;
+ u32 sge_count;
+ u8 cmd_type;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
- device_id = MEGASAS_DEV_INDEX(instance, scp);
-
/* Zero out some fields so they don't get reused */
memset(io_request->LUN, 0x0, 8);
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
@@ -1837,10 +1822,24 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- if (megasas_cmd_type(scp) == READ_WRITE_LDIO)
+ switch (cmd_type = megasas_cmd_type(scp)) {
+ case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
- else
- megasas_build_dcdb_fusion(instance, scp, cmd);
+ break;
+ case NON_READ_WRITE_LDIO:
+ megasas_build_ld_nonrw_fusion(instance, scp, cmd);
+ break;
+ case READ_WRITE_SYSPDIO:
+ case NON_READ_WRITE_SYSPDIO:
+ if (instance->secure_jbod_support &&
+ (cmd_type == NON_READ_WRITE_SYSPDIO))
+ megasas_build_syspd_fusion(instance, scp, cmd, 0);
+ else
+ megasas_build_syspd_fusion(instance, scp, cmd, 1);
+ break;
+ default:
+ break;
+ }
/*
* Construct SGL
@@ -1915,9 +1914,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
- cmd = megasas_get_cmd_fusion(instance);
- if (!cmd)
- return SCSI_MLQUEUE_HOST_BUSY;
+ cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
index = cmd->index;
@@ -1948,9 +1945,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
*/
atomic_inc(&instance->fw_outstanding);
- instance->instancet->fire_cmd(instance,
- req_desc->u.low, req_desc->u.high,
- instance->reg_set);
+ megasas_fire_cmd_fusion(instance, req_desc);
return 0;
}
@@ -1975,6 +1970,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
union desc_value d_val;
struct LD_LOAD_BALANCE_INFO *lbinfo;
int threshold_reply_count = 0;
+ struct scsi_cmnd *scmd_local = NULL;
fusion = instance->ctrl_context;
@@ -1998,7 +1994,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
num_completed = 0;
- while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
+ while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
+ d_val.u.high != cpu_to_le32(UINT_MAX)) {
smid = le16_to_cpu(reply_desc->SMID);
cmd_fusion = fusion->cmd_list[smid - 1];
@@ -2010,14 +2007,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
if (cmd_fusion->scmd)
cmd_fusion->scmd->SCp.ptr = NULL;
+ scmd_local = cmd_fusion->scmd;
status = scsi_io_req->RaidContext.status;
extStatus = scsi_io_req->RaidContext.exStatus;
switch (scsi_io_req->Function) {
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
- device_id = MEGASAS_DEV_INDEX(instance,
- cmd_fusion->scmd);
+ device_id = MEGASAS_DEV_INDEX(scmd_local);
lbinfo = &fusion->load_balance_info[device_id];
if (cmd_fusion->scmd->SCp.Status &
MEGASAS_LOAD_BALANCE_FLAG) {
@@ -2035,29 +2032,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
/* Map the FW Cmd Status */
map_cmd_status(cmd_fusion, status, extStatus);
- scsi_dma_unmap(cmd_fusion->scmd);
- cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
scsi_io_req->RaidContext.status = 0;
scsi_io_req->RaidContext.exStatus = 0;
megasas_return_cmd_fusion(instance, cmd_fusion);
+ scsi_dma_unmap(scmd_local);
+ scmd_local->scsi_done(scmd_local);
atomic_dec(&instance->fw_outstanding);
break;
case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
- if (!cmd_mfi->mpt_pthr_cmd_blocked) {
- if (megasas_dbg_lvl == 5)
- dev_info(&instance->pdev->dev,
- "freeing mfi/mpt pass-through "
- "from %s %d\n",
- __func__, __LINE__);
- megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
- cmd_fusion);
- }
-
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- cmd_fusion->flags = 0;
+ /* Poll mode. Dummy free.
+ * In case of Interrupt mode, caller has reverse check.
+ */
+ if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
+ cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
+ megasas_return_cmd(instance, cmd_mfi);
+ } else
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
break;
}
@@ -2066,7 +2059,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
fusion->reply_q_depth)
fusion->last_reply_idx[MSIxIndex] = 0;
- desc->Words = ULLONG_MAX;
+ desc->Words = cpu_to_le64(ULLONG_MAX);
num_completed++;
threshold_reply_count++;
@@ -2217,27 +2210,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd;
struct fusion_context *fusion;
struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
- u32 opcode;
- cmd = megasas_get_cmd_fusion(instance);
- if (!cmd)
- return 1;
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd_fusion(instance,
+ instance->max_scsi_cmds + mfi_cmd->index);
/* Save the smid. To be used for returning the cmd */
mfi_cmd->context.smid = cmd->index;
- cmd->sync_cmd_idx = mfi_cmd->index;
-
- /* Set this only for Blocked commands */
- opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
- if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
- && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
- mfi_cmd->is_wait_event = 1;
-
- if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
- mfi_cmd->is_wait_event = 1;
-
- if (mfi_cmd->is_wait_event)
- mfi_cmd->mpt_pthr_cmd_blocked = cmd;
/*
* For cmds where the flag is set, store the flag and check
@@ -2246,9 +2226,8 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
*/
if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
- cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
- fusion = instance->ctrl_context;
io_req = cmd->io_request;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
@@ -2327,9 +2306,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
return;
}
- atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
- instance->instancet->fire_cmd(instance, req_desc->u.low,
- req_desc->u.high, instance->reg_set);
+ megasas_fire_cmd_fusion(instance, req_desc);
}
/**
@@ -2508,7 +2485,42 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
fusion->last_reply_idx[i] = 0;
reply_desc = fusion->reply_frames_desc;
for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
- reply_desc->Words = ULLONG_MAX;
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+}
+
+/*
+ * megasas_refire_mgmt_cmd : Re-fire management commands
+ * @instance: Controller's soft instance
+*/
+void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+{
+ int j;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u16 smid;
+
+ fusion = instance->ctrl_context;
+
+ /* Re-fire management commands.
+ * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
+ */
+ for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
+ cmd_fusion = fusion->cmd_list[j];
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ smid = le16_to_cpu(cmd_mfi->context.smid);
+
+ if (!smid)
+ continue;
+ req_desc = megasas_get_request_descriptor
+ (instance, smid - 1);
+ if (req_desc && (cmd_mfi->frame->dcmd.opcode !=
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)))
+ megasas_fire_cmd_fusion(instance, req_desc);
+ else
+ megasas_return_cmd(instance, cmd_mfi);
+ }
}
/* Check for a second path that is currently UP */
@@ -2538,14 +2550,13 @@ out:
/* Core fusion reset function */
int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
{
- int retval = SUCCESS, i, j, retry = 0, convert = 0;
+ int retval = SUCCESS, i, retry = 0, convert = 0;
struct megasas_instance *instance;
struct megasas_cmd_fusion *cmd_fusion;
struct fusion_context *fusion;
- struct megasas_cmd *cmd_mfi;
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 host_diag, abs_state, status_reg, reset_adapter;
u32 io_timeout_in_crash_mode = 0;
+ struct scsi_cmnd *scmd_local = NULL;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -2613,15 +2624,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
iotimeout = 0;
/* Now return commands back to the OS */
- for (i = 0 ; i < instance->max_fw_cmds; i++) {
+ for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
+ scmd_local = cmd_fusion->scmd;
if (cmd_fusion->scmd) {
- scsi_dma_unmap(cmd_fusion->scmd);
- cmd_fusion->scmd->result =
+ scmd_local->result =
megasas_check_mpio_paths(instance,
- cmd_fusion->scmd);
- cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ scmd_local);
megasas_return_cmd_fusion(instance, cmd_fusion);
+ scsi_dma_unmap(scmd_local);
+ scmd_local->scsi_done(scmd_local);
atomic_dec(&instance->fw_outstanding);
}
}
@@ -2790,44 +2802,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
continue;
}
- /* Re-fire management commands */
- for (j = 0 ; j < instance->max_fw_cmds; j++) {
- cmd_fusion = fusion->cmd_list[j];
- if (cmd_fusion->sync_cmd_idx !=
- (u32)ULONG_MAX) {
- cmd_mfi =
- instance->
- cmd_list[cmd_fusion->sync_cmd_idx];
- if (cmd_mfi->frame->dcmd.opcode ==
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
- megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
- } else {
- req_desc =
- megasas_get_request_descriptor(
- instance,
- cmd_mfi->context.smid
- -1);
- if (!req_desc) {
- printk(KERN_WARNING
- "req_desc NULL"
- " for scsi%d\n",
- instance->host->host_no);
- /* Return leaked MPT
- frame */
- megasas_return_cmd_fusion(instance, cmd_fusion);
- } else {
- instance->instancet->
- fire_cmd(instance,
- req_desc->
- u.low,
- req_desc->
- u.high,
- instance->
- reg_set);
- }
- }
- }
- }
+ megasas_refire_mgmt_cmd(instance);
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
@@ -2978,7 +2953,6 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
}
struct megasas_instance_template megasas_instance_template_fusion = {
- .fire_cmd = megasas_fire_cmd_fusion,
.enable_intr = megasas_enable_intr_fusion,
.disable_intr = megasas_disable_intr_fusion,
.clear_intr = megasas_clear_intr_fusion,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 56e6db2d5874..ced6dc0cf8e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -104,18 +104,18 @@ struct RAID_CONTEXT {
u8 nseg:4;
#endif
u8 resvd0;
- u16 timeoutValue;
+ __le16 timeoutValue;
u8 regLockFlags;
u8 resvd1;
- u16 VirtualDiskTgtId;
- u64 regLockRowLBA;
- u32 regLockLength;
- u16 nextLMId;
+ __le16 VirtualDiskTgtId;
+ __le64 regLockRowLBA;
+ __le32 regLockLength;
+ __le16 nextLMId;
u8 exStatus;
u8 status;
u8 RAIDFlags;
u8 numSGE;
- u16 configSeqNum;
+ __le16 configSeqNum;
u8 spanArm;
u8 resvd2[3];
};
@@ -182,61 +182,61 @@ enum REGION_TYPE {
#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
struct MPI25_IEEE_SGE_CHAIN64 {
- u64 Address;
- u32 Length;
- u16 Reserved1;
+ __le64 Address;
+ __le32 Length;
+ __le16 Reserved1;
u8 NextChainOffset;
u8 Flags;
};
struct MPI2_SGE_SIMPLE_UNION {
- u32 FlagsLength;
+ __le32 FlagsLength;
union {
- u32 Address32;
- u64 Address64;
+ __le32 Address32;
+ __le64 Address64;
} u;
};
struct MPI2_SCSI_IO_CDB_EEDP32 {
u8 CDB[20]; /* 0x00 */
- u32 PrimaryReferenceTag; /* 0x14 */
- u16 PrimaryApplicationTag; /* 0x18 */
- u16 PrimaryApplicationTagMask; /* 0x1A */
- u32 TransferLength; /* 0x1C */
+ __be32 PrimaryReferenceTag; /* 0x14 */
+ __be16 PrimaryApplicationTag; /* 0x18 */
+ __be16 PrimaryApplicationTagMask; /* 0x1A */
+ __le32 TransferLength; /* 0x1C */
};
struct MPI2_SGE_CHAIN_UNION {
- u16 Length;
+ __le16 Length;
u8 NextChainOffset;
u8 Flags;
union {
- u32 Address32;
- u64 Address64;
+ __le32 Address32;
+ __le64 Address64;
} u;
};
struct MPI2_IEEE_SGE_SIMPLE32 {
- u32 Address;
- u32 FlagsLength;
+ __le32 Address;
+ __le32 FlagsLength;
};
struct MPI2_IEEE_SGE_CHAIN32 {
- u32 Address;
- u32 FlagsLength;
+ __le32 Address;
+ __le32 FlagsLength;
};
struct MPI2_IEEE_SGE_SIMPLE64 {
- u64 Address;
- u32 Length;
- u16 Reserved1;
+ __le64 Address;
+ __le32 Length;
+ __le16 Reserved1;
u8 Reserved2;
u8 Flags;
};
struct MPI2_IEEE_SGE_CHAIN64 {
- u64 Address;
- u32 Length;
- u16 Reserved1;
+ __le64 Address;
+ __le32 Length;
+ __le16 Reserved1;
u8 Reserved2;
u8 Flags;
};
@@ -269,34 +269,34 @@ union MPI2_SCSI_IO_CDB_UNION {
* Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
*/
struct MPI2_RAID_SCSI_IO_REQUEST {
- u16 DevHandle; /* 0x00 */
+ __le16 DevHandle; /* 0x00 */
u8 ChainOffset; /* 0x02 */
u8 Function; /* 0x03 */
- u16 Reserved1; /* 0x04 */
+ __le16 Reserved1; /* 0x04 */
u8 Reserved2; /* 0x06 */
u8 MsgFlags; /* 0x07 */
u8 VP_ID; /* 0x08 */
u8 VF_ID; /* 0x09 */
- u16 Reserved3; /* 0x0A */
- u32 SenseBufferLowAddress; /* 0x0C */
- u16 SGLFlags; /* 0x10 */
+ __le16 Reserved3; /* 0x0A */
+ __le32 SenseBufferLowAddress; /* 0x0C */
+ __le16 SGLFlags; /* 0x10 */
u8 SenseBufferLength; /* 0x12 */
u8 Reserved4; /* 0x13 */
u8 SGLOffset0; /* 0x14 */
u8 SGLOffset1; /* 0x15 */
u8 SGLOffset2; /* 0x16 */
u8 SGLOffset3; /* 0x17 */
- u32 SkipCount; /* 0x18 */
- u32 DataLength; /* 0x1C */
- u32 BidirectionalDataLength; /* 0x20 */
- u16 IoFlags; /* 0x24 */
- u16 EEDPFlags; /* 0x26 */
- u32 EEDPBlockSize; /* 0x28 */
- u32 SecondaryReferenceTag; /* 0x2C */
- u16 SecondaryApplicationTag; /* 0x30 */
- u16 ApplicationTagTranslationMask; /* 0x32 */
+ __le32 SkipCount; /* 0x18 */
+ __le32 DataLength; /* 0x1C */
+ __le32 BidirectionalDataLength; /* 0x20 */
+ __le16 IoFlags; /* 0x24 */
+ __le16 EEDPFlags; /* 0x26 */
+ __le32 EEDPBlockSize; /* 0x28 */
+ __le32 SecondaryReferenceTag; /* 0x2C */
+ __le16 SecondaryApplicationTag; /* 0x30 */
+ __le16 ApplicationTagTranslationMask; /* 0x32 */
u8 LUN[8]; /* 0x34 */
- u32 Control; /* 0x3C */
+ __le32 Control; /* 0x3C */
union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
struct RAID_CONTEXT RaidContext; /* 0x60 */
union MPI2_SGE_IO_UNION SGL; /* 0x80 */
@@ -315,45 +315,45 @@ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
u8 RequestFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 LMID; /* 0x04 */
- u16 DescriptorTypeDependent; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 LMID; /* 0x04 */
+ __le16 DescriptorTypeDependent; /* 0x06 */
};
/* High Priority Request Descriptor */
struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
u8 RequestFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 LMID; /* 0x04 */
- u16 Reserved1; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 LMID; /* 0x04 */
+ __le16 Reserved1; /* 0x06 */
};
/* SCSI IO Request Descriptor */
struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
u8 RequestFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 LMID; /* 0x04 */
- u16 DevHandle; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 LMID; /* 0x04 */
+ __le16 DevHandle; /* 0x06 */
};
/* SCSI Target Request Descriptor */
struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
u8 RequestFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 LMID; /* 0x04 */
- u16 IoIndex; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 LMID; /* 0x04 */
+ __le16 IoIndex; /* 0x06 */
};
/* RAID Accelerator Request Descriptor */
struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
u8 RequestFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 LMID; /* 0x04 */
- u16 Reserved; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 LMID; /* 0x04 */
+ __le16 Reserved; /* 0x06 */
};
/* union of Request Descriptors */
@@ -366,10 +366,10 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION {
struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
union {
struct {
- u32 low;
- u32 high;
+ __le32 low;
+ __le32 high;
} u;
- u64 Words;
+ __le64 Words;
};
};
@@ -377,35 +377,35 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION {
struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
u8 ReplyFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 DescriptorTypeDependent1; /* 0x02 */
- u32 DescriptorTypeDependent2; /* 0x04 */
+ __le16 DescriptorTypeDependent1; /* 0x02 */
+ __le32 DescriptorTypeDependent2; /* 0x04 */
};
/* Address Reply Descriptor */
struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
u8 ReplyFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u32 ReplyFrameAddress; /* 0x04 */
+ __le16 SMID; /* 0x02 */
+ __le32 ReplyFrameAddress; /* 0x04 */
};
/* SCSI IO Success Reply Descriptor */
struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
u8 ReplyFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u16 TaskTag; /* 0x04 */
- u16 Reserved1; /* 0x06 */
+ __le16 SMID; /* 0x02 */
+ __le16 TaskTag; /* 0x04 */
+ __le16 Reserved1; /* 0x06 */
};
/* TargetAssist Success Reply Descriptor */
struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
u8 ReplyFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
+ __le16 SMID; /* 0x02 */
u8 SequenceNumber; /* 0x04 */
u8 Reserved1; /* 0x05 */
- u16 IoIndex; /* 0x06 */
+ __le16 IoIndex; /* 0x06 */
};
/* Target Command Buffer Reply Descriptor */
@@ -414,16 +414,16 @@ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
u8 MSIxIndex; /* 0x01 */
u8 VP_ID; /* 0x02 */
u8 Flags; /* 0x03 */
- u16 InitiatorDevHandle; /* 0x04 */
- u16 IoIndex; /* 0x06 */
+ __le16 InitiatorDevHandle; /* 0x04 */
+ __le16 IoIndex; /* 0x06 */
};
/* RAID Accelerator Success Reply Descriptor */
struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
u8 ReplyFlags; /* 0x00 */
u8 MSIxIndex; /* 0x01 */
- u16 SMID; /* 0x02 */
- u32 Reserved; /* 0x04 */
+ __le16 SMID; /* 0x02 */
+ __le32 Reserved; /* 0x04 */
};
/* union of Reply Descriptors */
@@ -435,7 +435,7 @@ union MPI2_REPLY_DESCRIPTORS_UNION {
struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
RAIDAcceleratorSuccess;
- u64 Words;
+ __le64 Words;
};
/* IOCInit Request message */
@@ -444,28 +444,28 @@ struct MPI2_IOC_INIT_REQUEST {
u8 Reserved1; /* 0x01 */
u8 ChainOffset; /* 0x02 */
u8 Function; /* 0x03 */
- u16 Reserved2; /* 0x04 */
+ __le16 Reserved2; /* 0x04 */
u8 Reserved3; /* 0x06 */
u8 MsgFlags; /* 0x07 */
u8 VP_ID; /* 0x08 */
u8 VF_ID; /* 0x09 */
- u16 Reserved4; /* 0x0A */
- u16 MsgVersion; /* 0x0C */
- u16 HeaderVersion; /* 0x0E */
+ __le16 Reserved4; /* 0x0A */
+ __le16 MsgVersion; /* 0x0C */
+ __le16 HeaderVersion; /* 0x0E */
u32 Reserved5; /* 0x10 */
- u16 Reserved6; /* 0x14 */
+ __le16 Reserved6; /* 0x14 */
u8 Reserved7; /* 0x16 */
u8 HostMSIxVectors; /* 0x17 */
- u16 Reserved8; /* 0x18 */
- u16 SystemRequestFrameSize; /* 0x1A */
- u16 ReplyDescriptorPostQueueDepth; /* 0x1C */
- u16 ReplyFreeQueueDepth; /* 0x1E */
- u32 SenseBufferAddressHigh; /* 0x20 */
- u32 SystemReplyAddressHigh; /* 0x24 */
- u64 SystemRequestFrameBaseAddress; /* 0x28 */
- u64 ReplyDescriptorPostQueueAddress;/* 0x30 */
- u64 ReplyFreeQueueAddress; /* 0x38 */
- u64 TimeStamp; /* 0x40 */
+ __le16 Reserved8; /* 0x18 */
+ __le16 SystemRequestFrameSize; /* 0x1A */
+ __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ __le16 ReplyFreeQueueDepth; /* 0x1E */
+ __le32 SenseBufferAddressHigh; /* 0x20 */
+ __le32 SystemReplyAddressHigh; /* 0x24 */
+ __le64 SystemRequestFrameBaseAddress; /* 0x28 */
+ __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ __le64 ReplyFreeQueueAddress; /* 0x38 */
+ __le64 TimeStamp; /* 0x40 */
};
/* mrpriv defines */
@@ -491,41 +491,41 @@ struct MPI2_IOC_INIT_REQUEST {
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
struct MR_DEV_HANDLE_INFO {
- u16 curDevHdl;
+ __le16 curDevHdl;
u8 validHandles;
u8 reserved;
- u16 devHandle[2];
+ __le16 devHandle[2];
};
struct MR_ARRAY_INFO {
- u16 pd[MAX_RAIDMAP_ROW_SIZE];
+ __le16 pd[MAX_RAIDMAP_ROW_SIZE];
};
struct MR_QUAD_ELEMENT {
- u64 logStart;
- u64 logEnd;
- u64 offsetInSpan;
- u32 diff;
- u32 reserved1;
+ __le64 logStart;
+ __le64 logEnd;
+ __le64 offsetInSpan;
+ __le32 diff;
+ __le32 reserved1;
};
struct MR_SPAN_INFO {
- u32 noElements;
- u32 reserved1;
+ __le32 noElements;
+ __le32 reserved1;
struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
};
struct MR_LD_SPAN {
- u64 startBlk;
- u64 numBlks;
- u16 arrayRef;
+ __le64 startBlk;
+ __le64 numBlks;
+ __le16 arrayRef;
u8 spanRowSize;
u8 spanRowDataSize;
u8 reserved[4];
};
struct MR_SPAN_BLOCK_INFO {
- u64 num_rows;
+ __le64 num_rows;
struct MR_LD_SPAN span;
struct MR_SPAN_INFO block_span_info;
};
@@ -558,8 +558,8 @@ struct MR_LD_RAID {
u32 reserved4:7;
#endif
} capability;
- u32 reserved6;
- u64 size;
+ __le32 reserved6;
+ __le64 size;
u8 spanDepth;
u8 level;
u8 stripeShift;
@@ -568,12 +568,12 @@ struct MR_LD_RAID {
u8 writeMode;
u8 PRL;
u8 SRL;
- u16 targetId;
+ __le16 targetId;
u8 ldState;
u8 regTypeReqOnWrite;
u8 modFactor;
u8 regTypeReqOnRead;
- u16 seqNum;
+ __le16 seqNum;
struct {
u32 ldSyncRequired:1;
@@ -592,20 +592,20 @@ struct MR_LD_SPAN_MAP {
};
struct MR_FW_RAID_MAP {
- u32 totalSize;
+ __le32 totalSize;
union {
struct {
- u32 maxLd;
- u32 maxSpanDepth;
- u32 maxRowSize;
- u32 maxPdCount;
- u32 maxArrays;
+ __le32 maxLd;
+ __le32 maxSpanDepth;
+ __le32 maxRowSize;
+ __le32 maxPdCount;
+ __le32 maxArrays;
} validationInfo;
- u32 version[5];
+ __le32 version[5];
};
- u32 ldCount;
- u32 Reserved1;
+ __le32 ldCount;
+ __le32 Reserved1;
u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
MAX_RAIDMAP_VIEWS];
u8 fpPdIoTimeoutSec;
@@ -620,7 +620,7 @@ struct IO_REQUEST_INFO {
u32 numBlocks;
u16 ldTgtId;
u8 isRead;
- u16 devHandle;
+ __le16 devHandle;
u64 pdBlock;
u8 fpOkForIo;
u8 IoforUnevenSpan;
@@ -634,7 +634,7 @@ struct IO_REQUEST_INFO {
struct MR_LD_TARGET_SYNC {
u8 targetId;
u8 reserved;
- u16 seqNum;
+ __le16 seqNum;
};
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -679,7 +679,6 @@ struct megasas_cmd_fusion {
*/
u32 sync_cmd_idx;
u32 index;
- u8 flags;
u8 pd_r1_lb;
};
@@ -720,27 +719,27 @@ struct MR_DRV_RAID_MAP {
* This feild will be manupulated by driver for ext raid map,
* else pick the value from firmware raid map.
*/
- u32 totalSize;
+ __le32 totalSize;
union {
struct {
- u32 maxLd;
- u32 maxSpanDepth;
- u32 maxRowSize;
- u32 maxPdCount;
- u32 maxArrays;
+ __le32 maxLd;
+ __le32 maxSpanDepth;
+ __le32 maxRowSize;
+ __le32 maxPdCount;
+ __le32 maxArrays;
} validationInfo;
- u32 version[5];
+ __le32 version[5];
};
/* timeout value used by driver in FP IOs*/
u8 fpPdIoTimeoutSec;
u8 reserved2[7];
- u16 ldCount;
- u16 arCount;
- u16 spanCount;
- u16 reserve3;
+ __le16 ldCount;
+ __le16 arCount;
+ __le16 spanCount;
+ __le16 reserve3;
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
@@ -779,10 +778,10 @@ struct MR_FW_RAID_MAP_EXT {
u8 fpPdIoTimeoutSec;
u8 reserved2[7];
- u16 ldCount;
- u16 arCount;
- u16 spanCount;
- u16 reserve3;
+ __le16 ldCount;
+ __le16 arCount;
+ __le16 spanCount;
+ __le16 reserve3;
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
@@ -792,10 +791,6 @@ struct MR_FW_RAID_MAP_EXT {
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
- struct list_head cmd_pool;
-
- spinlock_t mpt_pool_lock;
-
dma_addr_t req_frames_desc_phys;
u8 *req_frames_desc;
@@ -839,10 +834,10 @@ struct fusion_context {
};
union desc_value {
- u64 word;
+ __le64 word;
struct {
- u32 low;
- u32 high;
+ __le32 low;
+ __le32 high;
} u;
};
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 53030b0e8015..d40d734aa53a 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -56,7 +56,6 @@ static struct scsi_host_template mvs_sht = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
- .cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index c6077cefbeca..53c84771f0e8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -274,7 +274,6 @@ static struct scsi_host_template nsp32_template = {
.can_queue = 1,
.sg_tablesize = NSP32_SG_SIZE,
.max_sectors = 128,
- .cmd_per_lun = 1,
.this_id = NSP32_HOST_SCSIID,
.use_clustering = DISABLE_CLUSTERING,
.eh_abort_handler = nsp32_eh_abort,
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 1b6c8833a304..5fb6eefc6541 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -86,7 +86,6 @@ static struct scsi_host_template nsp_driver_template = {
.can_queue = 1,
.this_id = NSP_INITIATOR_ID,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index bcaf89fe0c9e..c670dc704c74 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -72,7 +72,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 155f9573021f..20011c8afbb5 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -680,7 +680,6 @@ static struct scsi_host_template sym53c500_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = SYM53C500_shost_attrs
};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 65555916d3b8..a132f2664d2f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -78,7 +78,6 @@ static struct scsi_host_template pm8001_sht = {
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
- .cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 1db8b26063b4..ee00e27ba396 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -974,7 +974,6 @@ static struct scsi_host_template ppa_template = {
.bios_param = ppa_biosparam,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
.slave_alloc = ppa_adjust_queue,
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 5298def33733..4924424d20fe 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -347,7 +347,6 @@ static struct scsi_host_template ps3rom_host_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.emulated = 1, /* only sg driver uses this */
.max_sectors = PS3ROM_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index c68a66e8cfc1..5d0ec42a9317 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4217,7 +4217,6 @@ static struct scsi_host_template qla1280_driver_template = {
.can_queue = 0xfffff,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 285cb204f300..664013115c9d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -708,7 +708,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00d4,
"Unable to initialize ISP84XX.\n");
- qla84xx_put_chip(vha);
+ qla84xx_put_chip(vha);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a1ab25fca874..36fbd4c7af8f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2797,10 +2797,10 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
}
if (index == req->num_outstanding_cmds) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6dc14cd782b2..5559d5e75bbf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1580,7 +1580,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
ql_log(ql_log_warn, fcport->vha, 0x503c,
"Async-%s error - hdl=%x response(%x).\n",
type, sp->handle, sts->data[3]);
- iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
}
}
@@ -1979,7 +1979,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
rval = EXT_STATUS_ERR;
break;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->reply_payload_rcv_len = 0;
done:
/* Return the vendor specific reply to API */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 7d2b18f2675c..1620b0ec977b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1843,7 +1843,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
ptab_desc = qla82xx_get_table_desc(unirom,
QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
- if (!ptab_desc)
+ if (!ptab_desc)
return -1;
entries = cpu_to_le32(ptab_desc->num_entries);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index ed4d6b6b53e3..000c57e4d033 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -397,11 +397,11 @@ qla8044_idc_lock(struct qla_hw_data *ha)
* has the lock, wait for 2secs
* and retry
*/
- ql_dbg(ql_dbg_p3p, vha, 0xb08a,
- "%s: IDC lock Recovery by %d "
- "failed, Retrying timeout\n", __func__,
- ha->portnum);
- timeout = 0;
+ ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+ "%s: IDC lock Recovery by %d "
+ "failed, Retrying timeout\n", __func__,
+ ha->portnum);
+ timeout = 0;
}
}
msleep(QLA8044_DRV_LOCK_MSLEEP);
@@ -3141,8 +3141,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
goto error;
addr7 = addr2 - (4 * stride1);
- data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
- mask, addr7);
+ data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
if (data == -1)
goto error;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7462dd70b150..a28815b8276f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4418,7 +4418,10 @@ retry_lock2:
void
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
- uint16_t options = (requester_id << 15) | BIT_7, retry;
+#if 0
+ uint16_t options = (requester_id << 15) | BIT_7;
+#endif
+ uint16_t retry;
uint32_t data;
struct qla_hw_data *ha = base_vha->hw;
@@ -4454,6 +4457,7 @@ retry_unlock:
return;
+#if 0
/* XXX: IDC-unlock implementation using access-control mbx */
retry = 0;
retry_unlock2:
@@ -4469,6 +4473,7 @@ retry_unlock2:
}
return;
+#endif
}
int
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fe8a8d157e22..4a484d60be0d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3712,6 +3712,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
{
+#if 1
+ /*
+ * FIXME: Reject non zero SRR relative offset until we can test
+ * this code properly.
+ */
+ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+ return -1;
+#else
struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
size_t first_offset = 0, rem_offset = offset, tmp = 0;
int i, sg_srr_cnt, bufflen = 0;
@@ -3721,13 +3729,6 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
"cmd->sg_cnt: %u, direction: %d\n",
cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
- /*
- * FIXME: Reject non zero SRR relative offset until we can test
- * this code properly.
- */
- pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
- return -1;
-
if (!cmd->sg || !cmd->sg_cnt) {
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
"Missing cmd->sg or zero cmd->sg_cnt in"
@@ -3810,6 +3811,7 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
BUG();
return 0;
+#endif
}
static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index a22bb1b40ce2..61cac87fb86f 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -193,7 +193,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index fe122700cad8..676385ff28ef 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1287,7 +1287,6 @@ static struct scsi_host_template qpti_template = {
.can_queue = QLOGICPTI_REQ_QUEUE_LEN,
.this_id = 7,
.sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
- .cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3833bf59fb66..207d6a7a1bd0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -98,52 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain);
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
-/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
- * You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
- */
-static const char *const scsi_device_types[] = {
- "Direct-Access ",
- "Sequential-Access",
- "Printer ",
- "Processor ",
- "WORM ",
- "CD-ROM ",
- "Scanner ",
- "Optical Device ",
- "Medium Changer ",
- "Communications ",
- "ASC IT8 ",
- "ASC IT8 ",
- "RAID ",
- "Enclosure ",
- "Direct-Access-RBC",
- "Optical card ",
- "Bridge controller",
- "Object storage ",
- "Automation/Drive ",
- "Security Manager ",
- "Direct-Access-ZBC",
-};
-
-/**
- * scsi_device_type - Return 17 char string indicating device type.
- * @type: type number to look up
- */
-
-const char * scsi_device_type(unsigned type)
-{
- if (type == 0x1e)
- return "Well-known LUN ";
- if (type == 0x1f)
- return "No Device ";
- if (type >= ARRAY_SIZE(scsi_device_types))
- return "Unknown ";
- return scsi_device_types[type];
-}
-
-EXPORT_SYMBOL(scsi_device_type);
-
struct scsi_host_cmd_pool {
struct kmem_cache *cmd_slab;
struct kmem_cache *sense_slab;
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
new file mode 100644
index 000000000000..2ff092252b76
--- /dev/null
+++ b/drivers/scsi/scsi_common.c
@@ -0,0 +1,178 @@
+/*
+ * SCSI functions used by both the initiator and the target code.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <scsi/scsi_common.h>
+
+/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
+ * You may not alter any existing entry (although adding new ones is
+ * encouraged once assigned by ANSI/INCITS T10
+ */
+static const char *const scsi_device_types[] = {
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications ",
+ "ASC IT8 ",
+ "ASC IT8 ",
+ "RAID ",
+ "Enclosure ",
+ "Direct-Access-RBC",
+ "Optical card ",
+ "Bridge controller",
+ "Object storage ",
+ "Automation/Drive ",
+ "Security Manager ",
+ "Direct-Access-ZBC",
+};
+
+/**
+ * scsi_device_type - Return 17 char string indicating device type.
+ * @type: type number to look up
+ */
+const char *scsi_device_type(unsigned type)
+{
+ if (type == 0x1e)
+ return "Well-known LUN ";
+ if (type == 0x1f)
+ return "No Device ";
+ if (type >= ARRAY_SIZE(scsi_device_types))
+ return "Unknown ";
+ return scsi_device_types[type];
+}
+EXPORT_SYMBOL(scsi_device_type);
+
+/**
+ * scsilun_to_int - convert a scsi_lun to an int
+ * @scsilun: struct scsi_lun to be converted.
+ *
+ * Description:
+ * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * integer, and return the result. The caller must check for
+ * truncation before using this function.
+ *
+ * Notes:
+ * For a description of the LUN format, post SCSI-3 see the SCSI
+ * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
+ *
+ * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
+ * returns the integer: 0x0b03d204
+ *
+ * This encoding will return a standard integer LUN for LUNs smaller
+ * than 256, which typically use a single level LUN structure with
+ * addressing method 0.
+ */
+u64 scsilun_to_int(struct scsi_lun *scsilun)
+{
+ int i;
+ u64 lun;
+
+ lun = 0;
+ for (i = 0; i < sizeof(lun); i += 2)
+ lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
+ ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
+ return lun;
+}
+EXPORT_SYMBOL(scsilun_to_int);
+
+/**
+ * int_to_scsilun - reverts an int into a scsi_lun
+ * @lun: integer to be reverted
+ * @scsilun: struct scsi_lun to be set.
+ *
+ * Description:
+ * Reverts the functionality of the scsilun_to_int, which packed
+ * an 8-byte lun value into an int. This routine unpacks the int
+ * back into the lun value.
+ *
+ * Notes:
+ * Given an integer : 0x0b03d204, this function returns a
+ * struct scsi_lun of: d2 04 0b 03 00 00 00 00
+ *
+ */
+void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
+{
+ int i;
+
+ memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
+
+ for (i = 0; i < sizeof(lun); i += 2) {
+ scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
+ scsilun->scsi_lun[i+1] = lun & 0xFF;
+ lun = lun >> 16;
+ }
+}
+EXPORT_SYMBOL(int_to_scsilun);
+
+/**
+ * scsi_normalize_sense - normalize main elements from either fixed or
+ * descriptor sense data format into a common format.
+ *
+ * @sense_buffer: byte array containing sense data returned by device
+ * @sb_len: number of valid bytes in sense_buffer
+ * @sshdr: pointer to instance of structure that common
+ * elements are written to.
+ *
+ * Notes:
+ * The "main elements" from sense data are: response_code, sense_key,
+ * asc, ascq and additional_length (only for descriptor format).
+ *
+ * Typically this function can be called after a device has
+ * responded to a SCSI command with the CHECK_CONDITION status.
+ *
+ * Return value:
+ * true if valid sense data information found, else false;
+ */
+bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr)
+{
+ if (!sense_buffer || !sb_len)
+ return false;
+
+ memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
+
+ sshdr->response_code = (sense_buffer[0] & 0x7f);
+
+ if (!scsi_sense_valid(sshdr))
+ return false;
+
+ if (sshdr->response_code >= 0x72) {
+ /*
+ * descriptor format
+ */
+ if (sb_len > 1)
+ sshdr->sense_key = (sense_buffer[1] & 0xf);
+ if (sb_len > 2)
+ sshdr->asc = sense_buffer[2];
+ if (sb_len > 3)
+ sshdr->ascq = sense_buffer[3];
+ if (sb_len > 7)
+ sshdr->additional_length = sense_buffer[7];
+ } else {
+ /*
+ * fixed format
+ */
+ if (sb_len > 2)
+ sshdr->sense_key = (sense_buffer[2] & 0xf);
+ if (sb_len > 7) {
+ sb_len = (sb_len < (sense_buffer[7] + 8)) ?
+ sb_len : (sense_buffer[7] + 8);
+ if (sb_len > 12)
+ sshdr->asc = sense_buffer[12];
+ if (sb_len > 13)
+ sshdr->ascq = sense_buffer[13];
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(scsi_normalize_sense);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c95a4e943fc6..106884a5444e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2399,70 +2399,6 @@ out_put_autopm_host:
}
EXPORT_SYMBOL(scsi_ioctl_reset);
-/**
- * scsi_normalize_sense - normalize main elements from either fixed or
- * descriptor sense data format into a common format.
- *
- * @sense_buffer: byte array containing sense data returned by device
- * @sb_len: number of valid bytes in sense_buffer
- * @sshdr: pointer to instance of structure that common
- * elements are written to.
- *
- * Notes:
- * The "main elements" from sense data are: response_code, sense_key,
- * asc, ascq and additional_length (only for descriptor format).
- *
- * Typically this function can be called after a device has
- * responded to a SCSI command with the CHECK_CONDITION status.
- *
- * Return value:
- * true if valid sense data information found, else false;
- */
-bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
- struct scsi_sense_hdr *sshdr)
-{
- if (!sense_buffer || !sb_len)
- return false;
-
- memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
-
- sshdr->response_code = (sense_buffer[0] & 0x7f);
-
- if (!scsi_sense_valid(sshdr))
- return false;
-
- if (sshdr->response_code >= 0x72) {
- /*
- * descriptor format
- */
- if (sb_len > 1)
- sshdr->sense_key = (sense_buffer[1] & 0xf);
- if (sb_len > 2)
- sshdr->asc = sense_buffer[2];
- if (sb_len > 3)
- sshdr->ascq = sense_buffer[3];
- if (sb_len > 7)
- sshdr->additional_length = sense_buffer[7];
- } else {
- /*
- * fixed format
- */
- if (sb_len > 2)
- sshdr->sense_key = (sense_buffer[2] & 0xf);
- if (sb_len > 7) {
- sb_len = (sb_len < (sense_buffer[7] + 8)) ?
- sb_len : (sense_buffer[7] + 8);
- if (sb_len > 12)
- sshdr->asc = sense_buffer[12];
- if (sb_len > 13)
- sshdr->ascq = sense_buffer[13];
- }
- }
-
- return true;
-}
-EXPORT_SYMBOL(scsi_normalize_sense);
-
bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr)
{
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6efab1c455e1..f9f3f8203d42 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -280,7 +280,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->host->cmd_per_lun, shost->bqt,
shost->hostt->tag_alloc_policy);
}
- scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
+ scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
+ sdev->host->cmd_per_lun : 1);
scsi_sysfs_device_initialize(sdev);
@@ -1269,68 +1270,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
}
/**
- * scsilun_to_int - convert a scsi_lun to an int
- * @scsilun: struct scsi_lun to be converted.
- *
- * Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
- * integer, and return the result. The caller must check for
- * truncation before using this function.
- *
- * Notes:
- * For a description of the LUN format, post SCSI-3 see the SCSI
- * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
- *
- * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
- * returns the integer: 0x0b03d204
- *
- * This encoding will return a standard integer LUN for LUNs smaller
- * than 256, which typically use a single level LUN structure with
- * addressing method 0.
- **/
-u64 scsilun_to_int(struct scsi_lun *scsilun)
-{
- int i;
- u64 lun;
-
- lun = 0;
- for (i = 0; i < sizeof(lun); i += 2)
- lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
- ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
- return lun;
-}
-EXPORT_SYMBOL(scsilun_to_int);
-
-/**
- * int_to_scsilun - reverts an int into a scsi_lun
- * @lun: integer to be reverted
- * @scsilun: struct scsi_lun to be set.
- *
- * Description:
- * Reverts the functionality of the scsilun_to_int, which packed
- * an 8-byte lun value into an int. This routine unpacks the int
- * back into the lun value.
- *
- * Notes:
- * Given an integer : 0x0b03d204, this function returns a
- * struct scsi_lun of: d2 04 0b 03 00 00 00 00
- *
- **/
-void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
-{
- int i;
-
- memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
-
- for (i = 0; i < sizeof(lun); i += 2) {
- scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
- scsilun->scsi_lun[i+1] = lun & 0xFF;
- lun = lun >> 16;
- }
-}
-EXPORT_SYMBOL(int_to_scsilun);
-
-/**
* scsi_report_lun_scan - Scan using SCSI REPORT LUN results
* @starget: which target
* @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 67d43e35693d..55647aae065c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -204,6 +204,8 @@ iscsi_create_endpoint(int dd_size)
iscsi_match_epid);
if (!dev)
break;
+ else
+ put_device(dev);
}
if (id == ISCSI_MAX_EPID) {
printk(KERN_ERR "Too many connections. Max supported %u\n",
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7f9d65fe4fd9..3b2fcb4fada0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2988,7 +2988,8 @@ static int sd_probe(struct device *dev)
sdkp->dev.class = &sd_disk_class;
dev_set_name(&sdkp->dev, "%s", dev_name(dev));
- if (device_add(&sdkp->dev))
+ error = device_add(&sdkp->dev);
+ if (error)
goto out_free_index;
get_device(dev);
diff --git a/drivers/scsi/snic/Makefile b/drivers/scsi/snic/Makefile
new file mode 100644
index 000000000000..ef7c0dd47f40
--- /dev/null
+++ b/drivers/scsi/snic/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_SCSI_SNIC) += snic.o
+
+snic-y := \
+ snic_attrs.o \
+ snic_main.o \
+ snic_res.o \
+ snic_isr.o \
+ snic_ctl.o \
+ snic_io.o \
+ snic_scsi.o \
+ snic_disc.o \
+ vnic_cq.o \
+ vnic_intr.o \
+ vnic_dev.o \
+ vnic_wq.o
+
+snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o
diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h
new file mode 100644
index 000000000000..a5290562c1fa
--- /dev/null
+++ b/drivers/scsi/snic/cq_desc.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specific area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specific[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h
new file mode 100644
index 000000000000..0a1be2ed0288
--- /dev/null
+++ b/drivers/scsi/snic/cq_enet_desc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
new file mode 100644
index 000000000000..d7f5ba6ba84c
--- /dev/null
+++ b/drivers/scsi/snic/snic.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _SNIC_H_
+#define _SNIC_H_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/mempool.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "snic_disc.h"
+#include "snic_io.h"
+#include "snic_res.h"
+#include "snic_trc.h"
+#include "snic_stats.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_snic.h"
+
+#define SNIC_DRV_NAME "snic"
+#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver"
+#define SNIC_DRV_VERSION "0.0.1.18"
+#define PFX SNIC_DRV_NAME ":"
+#define DFX SNIC_DRV_NAME "%d: "
+
+#define DESC_CLEAN_LOW_WATERMARK 8
+#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
+#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */
+#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */
+#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */
+#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */
+#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */
+#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */
+
+/*
+ * Tag bits used for special requests.
+ */
+#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */
+#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */
+#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */
+#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */
+#define SNIC_NO_TAG -1
+
+/*
+ * Command flags to identify the type of command and for other future use
+ */
+#define SNIC_NO_FLAGS 0
+#define SNIC_IO_INITIALIZED BIT(0)
+#define SNIC_IO_ISSUED BIT(1)
+#define SNIC_IO_DONE BIT(2)
+#define SNIC_IO_REQ_NULL BIT(3)
+#define SNIC_IO_ABTS_PENDING BIT(4)
+#define SNIC_IO_ABORTED BIT(5)
+#define SNIC_IO_ABTS_ISSUED BIT(6)
+#define SNIC_IO_TERM_ISSUED BIT(7)
+#define SNIC_IO_ABTS_TIMEDOUT BIT(8)
+#define SNIC_IO_ABTS_TERM_DONE BIT(9)
+#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10)
+#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11)
+#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12)
+#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13)
+#define SNIC_DEVICE_RESET BIT(14)
+#define SNIC_DEV_RST_ISSUED BIT(15)
+#define SNIC_DEV_RST_TIMEDOUT BIT(16)
+#define SNIC_DEV_RST_ABTS_ISSUED BIT(17)
+#define SNIC_DEV_RST_TERM_ISSUED BIT(18)
+#define SNIC_DEV_RST_DONE BIT(19)
+#define SNIC_DEV_RST_REQ_NULL BIT(20)
+#define SNIC_DEV_RST_ABTS_DONE BIT(21)
+#define SNIC_DEV_RST_TERM_DONE BIT(22)
+#define SNIC_DEV_RST_ABTS_PENDING BIT(23)
+#define SNIC_DEV_RST_PENDING BIT(24)
+#define SNIC_DEV_RST_NOTSUP BIT(25)
+#define SNIC_SCSI_CLEANUP BIT(26)
+#define SNIC_HOST_RESET_ISSUED BIT(27)
+
+#define SNIC_ABTS_TIMEOUT 30000 /* msec */
+#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
+#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */
+
+
+/*
+ * These are protected by the hashed req_lock.
+ */
+#define CMD_SP(Cmnd) \
+ (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi)
+#define CMD_STATE(Cmnd) \
+ (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state)
+#define CMD_ABTS_STATUS(Cmnd) \
+ (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status)
+#define CMD_LR_STATUS(Cmnd) \
+ (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status)
+#define CMD_FLAGS(Cmnd) \
+ (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags)
+
+#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */
+
+#define SNIC_MAX_TARGET 256
+#define SNIC_FLAGS_NONE (0)
+
+/* snic module params */
+extern unsigned int snic_max_qdepth;
+
+/* snic debugging */
+extern unsigned int snic_log_level;
+
+#define SNIC_MAIN_LOGGING 0x1
+#define SNIC_SCSI_LOGGING 0x2
+#define SNIC_ISR_LOGGING 0x8
+#define SNIC_DESC_LOGGING 0x10
+
+#define SNIC_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(snic_log_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define SNIC_MAIN_DBG(host, fmt, args...) \
+ SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \
+ shost_printk(KERN_INFO, host, fmt, ## args);)
+
+#define SNIC_SCSI_DBG(host, fmt, args...) \
+ SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
+ shost_printk(KERN_INFO, host, fmt, ##args);)
+
+#define SNIC_DISC_DBG(host, fmt, args...) \
+ SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
+ shost_printk(KERN_INFO, host, fmt, ##args);)
+
+#define SNIC_ISR_DBG(host, fmt, args...) \
+ SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \
+ shost_printk(KERN_INFO, host, fmt, ##args);)
+
+#define SNIC_HOST_ERR(host, fmt, args...) \
+ shost_printk(KERN_ERR, host, fmt, ##args)
+
+#define SNIC_HOST_INFO(host, fmt, args...) \
+ shost_printk(KERN_INFO, host, fmt, ##args)
+
+#define SNIC_INFO(fmt, args...) \
+ pr_info(PFX fmt, ## args)
+
+#define SNIC_DBG(fmt, args...) \
+ pr_info(PFX fmt, ## args)
+
+#define SNIC_ERR(fmt, args...) \
+ pr_err(PFX fmt, ## args)
+
+#ifdef DEBUG
+#define SNIC_BUG_ON(EXPR) \
+ ({ \
+ if (EXPR) { \
+ SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \
+ BUG_ON(EXPR); \
+ } \
+ })
+#else
+#define SNIC_BUG_ON(EXPR) \
+ ({ \
+ if (EXPR) { \
+ SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \
+ #EXPR, __func__, __LINE__); \
+ WARN_ON_ONCE(EXPR); \
+ } \
+ })
+#endif
+
+/* Soft assert */
+#define SNIC_ASSERT_NOT_IMPL(EXPR) \
+ ({ \
+ if (EXPR) {\
+ SNIC_INFO("Functionality not impl'ed at %s:%d\n", \
+ __func__, __LINE__); \
+ WARN_ON_ONCE(EXPR); \
+ } \
+ })
+
+
+extern const char *snic_state_str[];
+
+enum snic_intx_intr_index {
+ SNIC_INTX_WQ_RQ_COPYWQ,
+ SNIC_INTX_ERR,
+ SNIC_INTX_NOTIFY,
+ SNIC_INTX_INTR_MAX,
+};
+
+enum snic_msix_intr_index {
+ SNIC_MSIX_WQ,
+ SNIC_MSIX_IO_CMPL,
+ SNIC_MSIX_ERR_NOTIFY,
+ SNIC_MSIX_INTR_MAX,
+};
+
+struct snic_msix_entry {
+ int requested;
+ char devname[IFNAMSIZ];
+ irqreturn_t (*isr)(int, void *);
+ void *devid;
+};
+
+enum snic_state {
+ SNIC_INIT = 0,
+ SNIC_ERROR,
+ SNIC_ONLINE,
+ SNIC_OFFLINE,
+ SNIC_FWRESET,
+};
+
+#define SNIC_WQ_MAX 1
+#define SNIC_CQ_IO_CMPL_MAX 1
+#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX)
+
+/* firmware version information */
+struct snic_fw_info {
+ u32 fw_ver;
+ u32 hid; /* u16 hid | u16 vnic id */
+ u32 max_concur_ios; /* max concurrent ios */
+ u32 max_sgs_per_cmd; /* max sgls per IO */
+ u32 max_io_sz; /* max io size supported */
+ u32 hba_cap; /* hba capabilities */
+ u32 max_tgts; /* max tgts supported */
+ u16 io_tmo; /* FW Extended timeout */
+ struct completion *wait; /* protected by snic lock*/
+};
+
+/*
+ * snic_work item : defined to process asynchronous events
+ */
+struct snic_work {
+ struct work_struct work;
+ u16 ev_id;
+ u64 *ev_data;
+};
+
+/*
+ * snic structure to represent SCSI vNIC
+ */
+struct snic {
+ /* snic specific members */
+ struct list_head list;
+ char name[IFNAMSIZ];
+ atomic_t state;
+ spinlock_t snic_lock;
+ struct completion *remove_wait;
+ bool in_remove;
+ bool stop_link_events; /* stop processing link events */
+
+ /* discovery related */
+ struct snic_disc disc;
+
+ /* Scsi Host info */
+ struct Scsi_Host *shost;
+
+ /* vnic related structures */
+ struct vnic_dev_bar bar0;
+
+ struct vnic_stats *stats;
+ unsigned long stats_time;
+ unsigned long stats_reset_time;
+
+ struct vnic_dev *vdev;
+
+ /* hw resource info */
+ unsigned int wq_count;
+ unsigned int cq_count;
+ unsigned int intr_count;
+ unsigned int err_intr_offset;
+
+ int link_status; /* retrieved from svnic_dev_link_status() */
+ u32 link_down_cnt;
+
+ /* pci related */
+ struct pci_dev *pdev;
+ struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
+ struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
+
+ /* io related info */
+ mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */
+ ____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS];
+
+ /* Maintain snic specific commands, cmds with no tag in spl_cmd_list */
+ ____cacheline_aligned spinlock_t spl_cmd_lock;
+ struct list_head spl_cmd_list;
+
+ unsigned int max_tag_id;
+ atomic_t ios_inflight; /* io in flight counter */
+
+ struct vnic_snic_config config;
+
+ struct work_struct link_work;
+
+ /* firmware information */
+ struct snic_fw_info fwinfo;
+
+ /* Work for processing Target related work */
+ struct work_struct tgt_work;
+
+ /* Work for processing Discovery */
+ struct work_struct disc_work;
+
+ /* stats related */
+ unsigned int reset_stats;
+ atomic64_t io_cmpl_skip;
+ struct snic_stats s_stats; /* Per SNIC driver stats */
+
+ /* platform specific */
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ struct dentry *stats_host; /* Per snic debugfs root */
+ struct dentry *stats_file; /* Per snic debugfs file */
+ struct dentry *reset_stats_file;/* Per snic reset stats file */
+#endif
+
+ /* completion queue cache line section */
+ ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX];
+
+ /* work queue cache line section */
+ ____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX];
+ spinlock_t wq_lock[SNIC_WQ_MAX];
+
+ /* interrupt resource cache line section */
+ ____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX];
+}; /* end of snic structure */
+
+/*
+ * SNIC Driver's Global Data
+ */
+struct snic_global {
+ struct list_head snic_list;
+ spinlock_t snic_list_lock;
+
+ struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES];
+
+ struct workqueue_struct *event_q;
+
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ /* debugfs related global data */
+ struct dentry *trc_root;
+ struct dentry *stats_root;
+
+ struct snic_trc trc ____cacheline_aligned;
+#endif
+};
+
+extern struct snic_global *snic_glob;
+
+int snic_glob_init(void);
+void snic_glob_cleanup(void);
+
+extern struct workqueue_struct *snic_event_queue;
+extern struct device_attribute *snic_attrs[];
+
+int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+int snic_abort_cmd(struct scsi_cmnd *);
+int snic_device_reset(struct scsi_cmnd *);
+int snic_host_reset(struct scsi_cmnd *);
+int snic_reset(struct Scsi_Host *, struct scsi_cmnd *);
+void snic_shutdown_scsi_cleanup(struct snic *);
+
+
+int snic_request_intr(struct snic *);
+void snic_free_intr(struct snic *);
+int snic_set_intr_mode(struct snic *);
+void snic_clear_intr_mode(struct snic *);
+
+int snic_fwcq_cmpl_handler(struct snic *, int);
+int snic_wq_cmpl_handler(struct snic *, int);
+void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *);
+
+
+void snic_log_q_error(struct snic *);
+void snic_handle_link_event(struct snic *);
+void snic_handle_link(struct work_struct *);
+
+int snic_queue_exch_ver_req(struct snic *);
+int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
+
+int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
+
+void snic_handle_untagged_req(struct snic *, struct snic_req_info *);
+void snic_release_untagged_req(struct snic *, struct snic_req_info *);
+void snic_free_all_untagged_reqs(struct snic *);
+int snic_get_conf(struct snic *);
+void snic_set_state(struct snic *, enum snic_state);
+int snic_get_state(struct snic *);
+const char *snic_state_to_str(unsigned int);
+void snic_hex_dump(char *, char *, int);
+void snic_print_desc(const char *fn, char *os_buf, int len);
+const char *show_opcode_name(int val);
+#endif /* _SNIC_H */
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
new file mode 100644
index 000000000000..32d5d556b6f8
--- /dev/null
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include "snic.h"
+
+static ssize_t
+snic_show_sym_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct snic *snic = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
+}
+
+static ssize_t
+snic_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct snic *snic = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ snic_state_str[snic_get_state(snic)]);
+}
+
+static ssize_t
+snic_show_drv_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
+}
+
+static ssize_t
+snic_show_link_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct snic *snic = shost_priv(class_to_shost(dev));
+
+ if (snic->config.xpt_type == SNIC_DAS)
+ snic->link_status = svnic_dev_link_status(snic->vdev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (snic->link_status) ? "Link Up" : "Link Down");
+}
+
+static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
+static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
+static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
+
+struct device_attribute *snic_attrs[] = {
+ &dev_attr_snic_sym_name,
+ &dev_attr_snic_state,
+ &dev_attr_drv_version,
+ &dev_attr_link_state,
+ NULL,
+};
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
new file mode 100644
index 000000000000..aebe75320ed3
--- /dev/null
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/mempool.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/ctype.h>
+
+#include "snic_io.h"
+#include "snic.h"
+#include "cq_enet_desc.h"
+#include "snic_fwint.h"
+
+/*
+ * snic_handle_link : Handles link flaps.
+ */
+void
+snic_handle_link(struct work_struct *work)
+{
+ struct snic *snic = container_of(work, struct snic, link_work);
+
+ if (snic->config.xpt_type != SNIC_DAS) {
+ SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
+ SNIC_ASSERT_NOT_IMPL(1);
+
+ return;
+ }
+
+ snic->link_status = svnic_dev_link_status(snic->vdev);
+ snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
+ SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
+ ((snic->link_status) ? "Up" : "Down"));
+}
+
+
+/*
+ * snic_ver_enc : Encodes version str to int
+ * version string is similar to netmask string
+ */
+static int
+snic_ver_enc(const char *s)
+{
+ int v[4] = {0};
+ int i = 0, x = 0;
+ char c;
+ const char *p = s;
+
+ /* validate version string */
+ if ((strlen(s) > 15) || (strlen(s) < 7))
+ goto end;
+
+ while ((c = *p++)) {
+ if (c == '.') {
+ i++;
+ continue;
+ }
+
+ if (i > 4 || !isdigit(c))
+ goto end;
+
+ v[i] = v[i] * 10 + (c - '0');
+ }
+
+ /* validate sub version numbers */
+ for (i = 3; i >= 0; i--)
+ if (v[i] > 0xff)
+ goto end;
+
+ x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
+
+end:
+ if (x == 0) {
+ SNIC_ERR("Invalid version string [%s].\n", s);
+
+ return -1;
+ }
+
+ return x;
+} /* end of snic_ver_enc */
+
+/*
+ * snic_qeueue_exch_ver_req :
+ *
+ * Queues Exchange Version Request, to communicate host information
+ * in return, it gets firmware version details
+ */
+int
+snic_queue_exch_ver_req(struct snic *snic)
+{
+ struct snic_req_info *rqi = NULL;
+ struct snic_host_req *req = NULL;
+ u32 ver = 0;
+ int ret = 0;
+
+ SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
+
+ rqi = snic_req_init(snic, 0);
+ if (!rqi) {
+ SNIC_HOST_ERR(snic->shost,
+ "Queuing Exch Ver Req failed, err = %d\n",
+ ret);
+
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ req = rqi_to_req(rqi);
+
+ /* Initialize snic_host_req */
+ snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
+ snic->config.hid, 0, (ulong)rqi);
+ ver = snic_ver_enc(SNIC_DRV_VERSION);
+ req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
+ req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
+
+ snic_handle_untagged_req(snic, rqi);
+
+ ret = snic_queue_wq_desc(snic, req, sizeof(*req));
+ if (ret) {
+ snic_release_untagged_req(snic, rqi);
+ SNIC_HOST_ERR(snic->shost,
+ "Queuing Exch Ver Req failed, err = %d\n",
+ ret);
+ goto error;
+ }
+
+ SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
+
+error:
+ return ret;
+} /* end of snic_queue_exch_ver_req */
+
+/*
+ * snic_io_exch_ver_cmpl_handler
+ */
+int
+snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ struct snic_req_info *rqi = NULL;
+ struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
+ u8 typ, hdr_stat;
+ u32 cmnd_id, hid, max_sgs;
+ ulong ctx = 0;
+ unsigned long flags;
+ int ret = 0;
+
+ SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+ SNIC_BUG_ON(snic->config.hid != hid);
+ rqi = (struct snic_req_info *) ctx;
+
+ if (hdr_stat) {
+ SNIC_HOST_ERR(snic->shost,
+ "Exch Ver Completed w/ err status %d\n",
+ hdr_stat);
+
+ goto exch_cmpl_end;
+ }
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
+ snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
+ snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
+ snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
+ snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
+ snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
+ snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
+
+ SNIC_HOST_INFO(snic->shost,
+ "vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
+ snic->fwinfo.fw_ver,
+ snic->fwinfo.hid,
+ snic->fwinfo.max_concur_ios,
+ snic->fwinfo.max_sgs_per_cmd,
+ snic->fwinfo.max_io_sz,
+ snic->fwinfo.max_tgts,
+ snic->fwinfo.io_tmo);
+
+ SNIC_HOST_INFO(snic->shost,
+ "HBA Capabilities = 0x%x\n",
+ le32_to_cpu(exv_cmpl->hba_cap));
+
+ /* Updating SGList size */
+ max_sgs = snic->fwinfo.max_sgs_per_cmd;
+ if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
+ snic->shost->sg_tablesize = max_sgs;
+ SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
+ snic->shost->sg_tablesize);
+ } else if (max_sgs > snic->shost->sg_tablesize) {
+ SNIC_HOST_INFO(snic->shost,
+ "Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
+ snic->config.xpt_type, max_sgs,
+ snic->shost->sg_tablesize);
+ }
+
+ if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
+ snic->shost->can_queue = snic->fwinfo.max_concur_ios;
+
+ snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
+ if (snic->fwinfo.wait)
+ complete(snic->fwinfo.wait);
+
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+exch_cmpl_end:
+ snic_release_untagged_req(snic, rqi);
+
+ SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
+
+ return ret;
+} /* end of snic_io_exch_ver_cmpl_handler */
+
+/*
+ * snic_get_conf
+ *
+ * Synchronous call, and Retrieves snic params.
+ */
+int
+snic_get_conf(struct snic *snic)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ unsigned long flags;
+ int ret;
+ int nr_retries = 3;
+
+ SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
+ snic->fwinfo.wait = &wait;
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ /* Additional delay to handle HW Resource initialization. */
+ msleep(50);
+
+ /*
+ * Exch ver req can be ignored by FW, if HW Resource initialization
+ * is in progress, Hence retry.
+ */
+ do {
+ ret = snic_queue_exch_ver_req(snic);
+ if (ret)
+ return ret;
+
+ wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
+ if (ret)
+ SNIC_HOST_ERR(snic->shost,
+ "Failed to retrieve snic params,\n");
+
+ /* Unset fwinfo.wait, on success or on last retry */
+ if (ret == 0 || nr_retries == 1)
+ snic->fwinfo.wait = NULL;
+
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ } while (ret && --nr_retries);
+
+ return ret;
+} /* end of snic_get_info */
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
new file mode 100644
index 000000000000..1686f0196251
--- /dev/null
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+
+#include "snic.h"
+
+/*
+ * snic_debugfs_init - Initialize debugfs for snic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up fnic debugfs
+ * filesystem. If not already created. this routine will crate the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging
+ */
+
+int
+snic_debugfs_init(void)
+{
+ int rc = -1;
+ struct dentry *de = NULL;
+
+ de = debugfs_create_dir("snic", NULL);
+ if (!de) {
+ SNIC_DBG("Cannot create debugfs root\n");
+
+ return rc;
+ }
+ snic_glob->trc_root = de;
+
+ de = debugfs_create_dir("statistics", snic_glob->trc_root);
+ if (!de) {
+ SNIC_DBG("Cannot create Statistics directory\n");
+
+ return rc;
+ }
+ snic_glob->stats_root = de;
+
+ rc = 0;
+
+ return rc;
+} /* end of snic_debugfs_init */
+
+/*
+ * snic_debugfs_term - Tear down debugfs intrastructure
+ *
+ * Description:
+ * When Debufs is configured this routine removes debugfs file system
+ * elements that are specific to snic
+ */
+void
+snic_debugfs_term(void)
+{
+ debugfs_remove(snic_glob->stats_root);
+ snic_glob->stats_root = NULL;
+
+ debugfs_remove(snic_glob->trc_root);
+ snic_glob->trc_root = NULL;
+}
+
+/*
+ * snic_reset_stats_open - Open the reset_stats file
+ */
+static int
+snic_reset_stats_open(struct inode *inode, struct file *filp)
+{
+ SNIC_BUG_ON(!inode->i_private);
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+/*
+ * snic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer tocopy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading frm.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t
+snic_reset_stats_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct snic *snic = (struct snic *) filp->private_data;
+ char buf[64];
+ int len;
+
+ len = sprintf(buf, "%u\n", snic->reset_stats);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * snic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of snic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t
+snic_reset_stats_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct snic *snic = (struct snic *) filp->private_data;
+ struct snic_stats *stats = &snic->s_stats;
+ u64 *io_stats_p = (u64 *) &stats->io;
+ u64 *fw_stats_p = (u64 *) &stats->fw;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ snic->reset_stats = val;
+
+ if (snic->reset_stats) {
+ /* Skip variable is used to avoid descrepancies to Num IOs
+ * and IO Completions stats. Skip incrementing No IO Compls
+ * for pending active IOs after reset_stats
+ */
+ atomic64_set(&snic->io_cmpl_skip,
+ atomic64_read(&stats->io.active));
+ memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
+ memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
+ memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
+ memset(io_stats_p+1,
+ 0,
+ sizeof(struct snic_io_stats) - sizeof(u64));
+ memset(fw_stats_p+1,
+ 0,
+ sizeof(struct snic_fw_stats) - sizeof(u64));
+ }
+
+ (*ppos)++;
+
+ SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");
+
+ return cnt;
+}
+
+static int
+snic_reset_stats_release(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * snic_stats_show - Formats and prints per host specific driver stats.
+ */
+static int
+snic_stats_show(struct seq_file *sfp, void *data)
+{
+ struct snic *snic = (struct snic *) sfp->private;
+ struct snic_stats *stats = &snic->s_stats;
+ struct timespec last_isr_tms, last_ack_tms;
+ u64 maxio_tm;
+ int i;
+
+ /* Dump IO Stats */
+ seq_printf(sfp,
+ "------------------------------------------\n"
+ "\t\t IO Statistics\n"
+ "------------------------------------------\n");
+
+ maxio_tm = (u64) atomic64_read(&stats->io.max_time);
+ seq_printf(sfp,
+ "Active IOs : %lld\n"
+ "Max Active IOs : %lld\n"
+ "Total IOs : %lld\n"
+ "IOs Completed : %lld\n"
+ "IOs Failed : %lld\n"
+ "IOs Not Found : %lld\n"
+ "Memory Alloc Failures : %lld\n"
+ "REQs Null : %lld\n"
+ "SCSI Cmd Pointers Null : %lld\n"
+ "Max SGL for any IO : %lld\n"
+ "Max IO Size : %lld Sectors\n"
+ "Max Queuing Time : %lld\n"
+ "Max Completion Time : %lld\n"
+ "Max IO Process Time(FW) : %lld (%u msec)\n",
+ (u64) atomic64_read(&stats->io.active),
+ (u64) atomic64_read(&stats->io.max_active),
+ (u64) atomic64_read(&stats->io.num_ios),
+ (u64) atomic64_read(&stats->io.compl),
+ (u64) atomic64_read(&stats->io.fail),
+ (u64) atomic64_read(&stats->io.io_not_found),
+ (u64) atomic64_read(&stats->io.alloc_fail),
+ (u64) atomic64_read(&stats->io.req_null),
+ (u64) atomic64_read(&stats->io.sc_null),
+ (u64) atomic64_read(&stats->io.max_sgl),
+ (u64) atomic64_read(&stats->io.max_io_sz),
+ (u64) atomic64_read(&stats->io.max_qtime),
+ (u64) atomic64_read(&stats->io.max_cmpl_time),
+ maxio_tm,
+ jiffies_to_msecs(maxio_tm));
+
+ seq_puts(sfp, "\nSGL Counters\n");
+
+ for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) {
+ seq_printf(sfp,
+ "%10lld ",
+ (u64) atomic64_read(&stats->io.sgl_cnt[i]));
+
+ if ((i + 1) % 8 == 0)
+ seq_puts(sfp, "\n");
+ }
+
+ /* Dump Abort Stats */
+ seq_printf(sfp,
+ "\n-------------------------------------------\n"
+ "\t\t Abort Statistics\n"
+ "---------------------------------------------\n");
+
+ seq_printf(sfp,
+ "Aborts : %lld\n"
+ "Aborts Fail : %lld\n"
+ "Aborts Driver Timeout : %lld\n"
+ "Abort FW Timeout : %lld\n"
+ "Abort IO NOT Found : %lld\n",
+ (u64) atomic64_read(&stats->abts.num),
+ (u64) atomic64_read(&stats->abts.fail),
+ (u64) atomic64_read(&stats->abts.drv_tmo),
+ (u64) atomic64_read(&stats->abts.fw_tmo),
+ (u64) atomic64_read(&stats->abts.io_not_found));
+
+ /* Dump Reset Stats */
+ seq_printf(sfp,
+ "\n-------------------------------------------\n"
+ "\t\t Reset Statistics\n"
+ "---------------------------------------------\n");
+
+ seq_printf(sfp,
+ "HBA Resets : %lld\n"
+ "HBA Reset Cmpls : %lld\n"
+ "HBA Reset Fail : %lld\n",
+ (u64) atomic64_read(&stats->reset.hba_resets),
+ (u64) atomic64_read(&stats->reset.hba_reset_cmpl),
+ (u64) atomic64_read(&stats->reset.hba_reset_fail));
+
+ /* Dump Firmware Stats */
+ seq_printf(sfp,
+ "\n-------------------------------------------\n"
+ "\t\t Firmware Statistics\n"
+ "---------------------------------------------\n");
+
+ seq_printf(sfp,
+ "Active FW Requests : %lld\n"
+ "Max FW Requests : %lld\n"
+ "FW Out Of Resource Errs : %lld\n"
+ "FW IO Errors : %lld\n"
+ "FW SCSI Errors : %lld\n",
+ (u64) atomic64_read(&stats->fw.actv_reqs),
+ (u64) atomic64_read(&stats->fw.max_actv_reqs),
+ (u64) atomic64_read(&stats->fw.out_of_res),
+ (u64) atomic64_read(&stats->fw.io_errs),
+ (u64) atomic64_read(&stats->fw.scsi_errs));
+
+
+ /* Dump Miscellenous Stats */
+ seq_printf(sfp,
+ "\n---------------------------------------------\n"
+ "\t\t Other Statistics\n"
+ "\n---------------------------------------------\n");
+
+ jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
+ jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
+
+ seq_printf(sfp,
+ "Last ISR Time : %llu (%8lu.%8lu)\n"
+ "Last Ack Time : %llu (%8lu.%8lu)\n"
+ "ISRs : %llu\n"
+ "Max CQ Entries : %lld\n"
+ "Data Count Mismatch : %lld\n"
+ "IOs w/ Timeout Status : %lld\n"
+ "IOs w/ Aborted Status : %lld\n"
+ "IOs w/ SGL Invalid Stat : %lld\n"
+ "WQ Desc Alloc Fail : %lld\n"
+ "Queue Full : %lld\n"
+ "Target Not Ready : %lld\n",
+ (u64) stats->misc.last_isr_time,
+ last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
+ (u64)stats->misc.last_ack_time,
+ last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
+ (u64) atomic64_read(&stats->misc.isr_cnt),
+ (u64) atomic64_read(&stats->misc.max_cq_ents),
+ (u64) atomic64_read(&stats->misc.data_cnt_mismat),
+ (u64) atomic64_read(&stats->misc.io_tmo),
+ (u64) atomic64_read(&stats->misc.io_aborted),
+ (u64) atomic64_read(&stats->misc.sgl_inval),
+ (u64) atomic64_read(&stats->misc.wq_alloc_fail),
+ (u64) atomic64_read(&stats->misc.qfull),
+ (u64) atomic64_read(&stats->misc.tgt_not_rdy));
+
+ return 0;
+}
+
+/*
+ * snic_stats_open - Open the stats file for specific host
+ *
+ * Description:
+ * This routine opens a debugfs file stats of specific host
+ */
+static int
+snic_stats_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, snic_stats_show, inode->i_private);
+}
+
+static const struct file_operations snic_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = snic_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations snic_reset_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = snic_reset_stats_open,
+ .read = snic_reset_stats_read,
+ .write = snic_reset_stats_write,
+ .release = snic_reset_stats_release,
+};
+
+/*
+ * snic_stats_init - Initialize stats struct and create stats file
+ * per snic
+ *
+ * Description:
+ * When debugfs is cofigured this routine sets up the stats file per snic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per snic stats
+ */
+int
+snic_stats_debugfs_init(struct snic *snic)
+{
+ int rc = -1;
+ char name[16];
+ struct dentry *de = NULL;
+
+ snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
+ if (!snic_glob->stats_root) {
+ SNIC_DBG("snic_stats root doesn't exist\n");
+
+ return rc;
+ }
+
+ de = debugfs_create_dir(name, snic_glob->stats_root);
+ if (!de) {
+ SNIC_DBG("Cannot create host directory\n");
+
+ return rc;
+ }
+ snic->stats_host = de;
+
+ de = debugfs_create_file("stats",
+ S_IFREG|S_IRUGO,
+ snic->stats_host,
+ snic,
+ &snic_stats_fops);
+ if (!de) {
+ SNIC_DBG("Cannot create host's stats file\n");
+
+ return rc;
+ }
+ snic->stats_file = de;
+
+ de = debugfs_create_file("reset_stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ snic->stats_host,
+ snic,
+ &snic_reset_stats_fops);
+
+ if (!de) {
+ SNIC_DBG("Cannot create host's reset_stats file\n");
+
+ return rc;
+ }
+ snic->reset_stats_file = de;
+ rc = 0;
+
+ return rc;
+} /* end of snic_stats_debugfs_init */
+
+/*
+ * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debufs is configured this routine removes debugfs file system
+ * elements that are specific to to snic stats
+ */
+void
+snic_stats_debugfs_remove(struct snic *snic)
+{
+ debugfs_remove(snic->stats_file);
+ snic->stats_file = NULL;
+
+ debugfs_remove(snic->reset_stats_file);
+ snic->reset_stats_file = NULL;
+
+ debugfs_remove(snic->stats_host);
+ snic->stats_host = NULL;
+}
+
+/* Trace Facility related API */
+static void *
+snic_trc_seq_start(struct seq_file *sfp, loff_t *pos)
+{
+ return &snic_glob->trc;
+}
+
+static void *
+snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos)
+{
+ return NULL;
+}
+
+static void
+snic_trc_seq_stop(struct seq_file *sfp, void *data)
+{
+}
+
+#define SNIC_TRC_PBLEN 256
+static int
+snic_trc_seq_show(struct seq_file *sfp, void *data)
+{
+ char buf[SNIC_TRC_PBLEN];
+
+ if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0)
+ seq_printf(sfp, "%s\n", buf);
+
+ return 0;
+}
+
+static const struct seq_operations snic_trc_seq_ops = {
+ .start = snic_trc_seq_start,
+ .next = snic_trc_seq_next,
+ .stop = snic_trc_seq_stop,
+ .show = snic_trc_seq_show,
+};
+
+static int
+snic_trc_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &snic_trc_seq_ops);
+}
+
+static const struct file_operations snic_trc_fops = {
+ .owner = THIS_MODULE,
+ .open = snic_trc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * snic_trc_debugfs_init : creates trace/tracing_enable files for trace
+ * under debugfs
+ */
+int
+snic_trc_debugfs_init(void)
+{
+ struct dentry *de = NULL;
+ int ret = -1;
+
+ if (!snic_glob->trc_root) {
+ SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
+
+ return ret;
+ }
+
+ de = debugfs_create_bool("tracing_enable",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root,
+ &snic_glob->trc.enable);
+
+ if (!de) {
+ SNIC_ERR("Can't create trace_enable file.\n");
+
+ return ret;
+ }
+ snic_glob->trc.trc_enable = de;
+
+ de = debugfs_create_file("trace",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root,
+ NULL,
+ &snic_trc_fops);
+
+ if (!de) {
+ SNIC_ERR("Cann't create trace file.\n");
+
+ return ret;
+ }
+ snic_glob->trc.trc_file = de;
+ ret = 0;
+
+ return ret;
+} /* end of snic_trc_debugfs_init */
+
+/*
+ * snic_trc_debugfs_term : cleans up the files created for trace under debugfs
+ */
+void
+snic_trc_debugfs_term(void)
+{
+ debugfs_remove(snic_glob->trc.trc_file);
+ snic_glob->trc.trc_file = NULL;
+
+ debugfs_remove(snic_glob->trc.trc_enable);
+ snic_glob->trc.trc_enable = NULL;
+}
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
new file mode 100644
index 000000000000..5f6321759ad9
--- /dev/null
+++ b/drivers/scsi/snic/snic_disc.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/mempool.h>
+
+#include <scsi/scsi_tcq.h>
+
+#include "snic_disc.h"
+#include "snic.h"
+#include "snic_io.h"
+
+
+/* snic target types */
+static const char * const snic_tgt_type_str[] = {
+ [SNIC_TGT_DAS] = "DAS",
+ [SNIC_TGT_SAN] = "SAN",
+};
+
+static inline const char *
+snic_tgt_type_to_str(int typ)
+{
+ return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
+ snic_tgt_type_str[typ] : "Unknown");
+}
+
+static const char * const snic_tgt_state_str[] = {
+ [SNIC_TGT_STAT_INIT] = "INIT",
+ [SNIC_TGT_STAT_ONLINE] = "ONLINE",
+ [SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
+ [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
+};
+
+const char *
+snic_tgt_state_to_str(int state)
+{
+ return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
+ snic_tgt_state_str[state] : "UNKNOWN");
+}
+
+/*
+ * Initiate report_tgt req desc
+ */
+static void
+snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
+ dma_addr_t rsp_buf_pa, ulong ctx)
+{
+ struct snic_sg_desc *sgd = NULL;
+
+
+ snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
+ 1, ctx);
+
+ req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
+ sgd = req_to_sgl(req);
+ sgd[0].addr = cpu_to_le64(rsp_buf_pa);
+ sgd[0].len = cpu_to_le32(len);
+ sgd[0]._resvd = 0;
+ req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
+}
+
+/*
+ * snic_queue_report_tgt_req: Queues report target request.
+ */
+static int
+snic_queue_report_tgt_req(struct snic *snic)
+{
+ struct snic_req_info *rqi = NULL;
+ u32 ntgts, buf_len = 0;
+ u8 *buf = NULL;
+ dma_addr_t pa = 0;
+ int ret = 0;
+
+ rqi = snic_req_init(snic, 1);
+ if (!rqi) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (snic->fwinfo.max_tgts)
+ ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
+ else
+ ntgts = snic->shost->max_id;
+
+ /* Allocate Response Buffer */
+ SNIC_BUG_ON(ntgts == 0);
+ buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
+
+ buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
+ if (!buf) {
+ snic_req_free(snic, rqi);
+ SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
+
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
+
+ pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(snic->pdev, pa)) {
+ kfree(buf);
+ snic_req_free(snic, rqi);
+ SNIC_HOST_ERR(snic->shost,
+ "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
+ buf);
+ ret = -EINVAL;
+
+ goto error;
+ }
+
+
+ SNIC_BUG_ON(pa == 0);
+ rqi->sge_va = (ulong) buf;
+
+ snic_report_tgt_init(rqi->req,
+ snic->config.hid,
+ buf,
+ buf_len,
+ pa,
+ (ulong)rqi);
+
+ snic_handle_untagged_req(snic, rqi);
+
+ ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
+ if (ret) {
+ pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+ kfree(buf);
+ rqi->sge_va = 0;
+ snic_release_untagged_req(snic, rqi);
+ SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
+
+ goto error;
+ }
+
+ SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
+
+ return ret;
+
+error:
+ SNIC_HOST_ERR(snic->shost,
+ "Queuing Report Targets Failed, err = %d\n",
+ ret);
+ return ret;
+} /* end of snic_queue_report_tgt_req */
+
+/* call into SML */
+static void
+snic_scsi_scan_tgt(struct work_struct *work)
+{
+ struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ unsigned long flags;
+
+ SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
+ scsi_scan_target(&tgt->dev,
+ tgt->channel,
+ tgt->scsi_tgt_id,
+ SCAN_WILD_CARD,
+ 1);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+} /* end of snic_scsi_scan_tgt */
+
+/*
+ * snic_tgt_lookup :
+ */
+static struct snic_tgt *
+snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
+{
+ struct list_head *cur, *nxt;
+ struct snic_tgt *tgt = NULL;
+
+ list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
+ tgt = list_entry(cur, struct snic_tgt, list);
+ if (tgt->id == le32_to_cpu(tgtid->tgt_id))
+ return tgt;
+ tgt = NULL;
+ }
+
+ return tgt;
+} /* end of snic_tgt_lookup */
+
+/*
+ * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
+ */
+void
+snic_tgt_dev_release(struct device *dev)
+{
+ struct snic_tgt *tgt = dev_to_tgt(dev);
+
+ SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
+ "Target Device ID %d (%s) Permanently Deleted.\n",
+ tgt->id,
+ dev_name(dev));
+
+ SNIC_BUG_ON(!list_empty(&tgt->list));
+ kfree(tgt);
+}
+
+/*
+ * snic_tgt_del : work function to delete snic_tgt
+ */
+static void
+snic_tgt_del(struct work_struct *work)
+{
+ struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
+ struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
+
+ if (tgt->flags & SNIC_TGT_SCAN_PENDING)
+ scsi_flush_work(shost);
+
+ /* Block IOs on child devices, stops new IOs */
+ scsi_target_block(&tgt->dev);
+
+ /* Cleanup IOs */
+ snic_tgt_scsi_abort_io(tgt);
+
+ /* Unblock IOs now, to flush if there are any. */
+ scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
+
+ /* Delete SCSI Target and sdevs */
+ scsi_remove_target(&tgt->dev); /* ?? */
+ device_del(&tgt->dev);
+ put_device(&tgt->dev);
+} /* end of snic_tgt_del */
+
+/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
+ * it creates one.
+ */
+static struct snic_tgt *
+snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+{
+ struct snic_tgt *tgt = NULL;
+ unsigned long flags;
+ int ret;
+
+ tgt = snic_tgt_lookup(snic, tgtid);
+ if (tgt) {
+ /* update the information if required */
+ return tgt;
+ }
+
+ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
+ if (!tgt) {
+ SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
+ ret = -ENOMEM;
+
+ return tgt;
+ }
+
+ INIT_LIST_HEAD(&tgt->list);
+ tgt->id = le32_to_cpu(tgtid->tgt_id);
+ tgt->channel = 0;
+
+ SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
+ tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
+
+ /*
+ * Plugging into SML Device Tree
+ */
+ tgt->tdata.disc_id = 0;
+ tgt->state = SNIC_TGT_STAT_INIT;
+ device_initialize(&tgt->dev);
+ tgt->dev.parent = get_device(&snic->shost->shost_gendev);
+ tgt->dev.release = snic_tgt_dev_release;
+ INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
+ INIT_WORK(&tgt->del_work, snic_tgt_del);
+ switch (tgt->tdata.typ) {
+ case SNIC_TGT_DAS:
+ dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
+ snic->shost->host_no, tgt->channel, tgt->id);
+ break;
+
+ case SNIC_TGT_SAN:
+ dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
+ snic->shost->host_no, tgt->channel, tgt->id);
+ break;
+
+ default:
+ SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
+ dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
+ snic->shost->host_no, tgt->channel, tgt->id);
+ break;
+ }
+
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+ list_add_tail(&tgt->list, &snic->disc.tgt_list);
+ tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
+ tgt->state = SNIC_TGT_STAT_ONLINE;
+ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+
+ SNIC_HOST_INFO(snic->shost,
+ "Tgt %d, type = %s detected. Adding..\n",
+ tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
+
+ ret = device_add(&tgt->dev);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "Snic Tgt: device_add, with err = %d\n",
+ ret);
+
+ put_device(&snic->shost->shost_gendev);
+ kfree(tgt);
+ tgt = NULL;
+
+ return tgt;
+ }
+
+ SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
+
+ scsi_queue_work(snic->shost, &tgt->scan_work);
+
+ return tgt;
+} /* end of snic_tgt_create */
+
+/* Handler for discovery */
+void
+snic_handle_tgt_disc(struct work_struct *work)
+{
+ struct snic *snic = container_of(work, struct snic, tgt_work);
+ struct snic_tgt_id *tgtid = NULL;
+ struct snic_tgt *tgt = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ if (snic->in_remove) {
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ kfree(snic->disc.rtgt_info);
+
+ return;
+ }
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ mutex_lock(&snic->disc.mutex);
+ /* Discover triggered during disc in progress */
+ if (snic->disc.req_cnt) {
+ snic->disc.state = SNIC_DISC_DONE;
+ snic->disc.req_cnt = 0;
+ mutex_unlock(&snic->disc.mutex);
+ kfree(snic->disc.rtgt_info);
+ snic->disc.rtgt_info = NULL;
+
+ SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
+ /* Start Discovery Again */
+ snic_disc_start(snic);
+
+ return;
+ }
+
+ tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
+
+ SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
+
+ for (i = 0; i < snic->disc.rtgt_cnt; i++) {
+ tgt = snic_tgt_create(snic, &tgtid[i]);
+ if (!tgt) {
+ int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
+
+ SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
+ snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
+ break;
+ }
+ }
+
+ snic->disc.rtgt_info = NULL;
+ snic->disc.state = SNIC_DISC_DONE;
+ mutex_unlock(&snic->disc.mutex);
+
+ SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
+
+ kfree(tgtid);
+} /* end of snic_handle_tgt_disc */
+
+
+int
+snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+
+ u8 typ, cmpl_stat;
+ u32 cmnd_id, hid, tgt_cnt = 0;
+ ulong ctx;
+ struct snic_req_info *rqi = NULL;
+ struct snic_tgt_id *tgtid;
+ int i, ret = 0;
+
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
+ rqi = (struct snic_req_info *) ctx;
+ tgtid = (struct snic_tgt_id *) rqi->sge_va;
+
+ tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
+ if (tgt_cnt == 0) {
+ SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
+ ret = 1;
+
+ goto end;
+ }
+
+ /* printing list of targets here */
+ SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
+
+ SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
+
+ for (i = 0; i < tgt_cnt; i++)
+ SNIC_HOST_INFO(snic->shost,
+ "Tgt id = 0x%x\n",
+ le32_to_cpu(tgtid[i].tgt_id));
+
+ /*
+ * Queue work for further processing,
+ * Response Buffer Memory is freed after creating targets
+ */
+ snic->disc.rtgt_cnt = tgt_cnt;
+ snic->disc.rtgt_info = (u8 *) tgtid;
+ queue_work(snic_glob->event_q, &snic->tgt_work);
+ ret = 0;
+
+end:
+ /* Unmap Response Buffer */
+ snic_pci_unmap_rsp_buf(snic, rqi);
+ if (ret)
+ kfree(tgtid);
+
+ rqi->sge_va = 0;
+ snic_release_untagged_req(snic, rqi);
+
+ return ret;
+} /* end of snic_report_tgt_cmpl_handler */
+
+/* Discovery init fn */
+void
+snic_disc_init(struct snic_disc *disc)
+{
+ INIT_LIST_HEAD(&disc->tgt_list);
+ mutex_init(&disc->mutex);
+ disc->disc_id = 0;
+ disc->nxt_tgt_id = 0;
+ disc->state = SNIC_DISC_INIT;
+ disc->req_cnt = 0;
+ disc->rtgt_cnt = 0;
+ disc->rtgt_info = NULL;
+ disc->cb = NULL;
+} /* end of snic_disc_init */
+
+/* Discovery, uninit fn */
+void
+snic_disc_term(struct snic *snic)
+{
+ struct snic_disc *disc = &snic->disc;
+
+ mutex_lock(&disc->mutex);
+ if (disc->req_cnt) {
+ disc->req_cnt = 0;
+ SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
+ }
+ mutex_unlock(&disc->mutex);
+}
+
+/*
+ * snic_disc_start: Discovery Start ...
+ */
+int
+snic_disc_start(struct snic *snic)
+{
+ struct snic_disc *disc = &snic->disc;
+ int ret = 0;
+
+ SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
+
+ mutex_lock(&disc->mutex);
+ if (disc->state == SNIC_DISC_PENDING) {
+ disc->req_cnt++;
+ mutex_unlock(&disc->mutex);
+
+ return ret;
+ }
+ disc->state = SNIC_DISC_PENDING;
+ mutex_unlock(&disc->mutex);
+
+ ret = snic_queue_report_tgt_req(snic);
+ if (ret)
+ SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
+
+ return ret;
+} /* end of snic_disc_start */
+
+/*
+ * snic_disc_work :
+ */
+void
+snic_handle_disc(struct work_struct *work)
+{
+ struct snic *snic = container_of(work, struct snic, disc_work);
+ int ret = 0;
+
+ SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
+
+ ret = snic_disc_start(snic);
+ if (ret)
+ goto disc_err;
+
+disc_err:
+ SNIC_HOST_ERR(snic->shost,
+ "disc_work: Discovery Failed w/ err = %d\n",
+ ret);
+} /* end of snic_disc_work */
+
+/*
+ * snic_tgt_del_all : cleanup all snic targets
+ * Called on unbinding the interface
+ */
+void
+snic_tgt_del_all(struct snic *snic)
+{
+ struct snic_tgt *tgt = NULL;
+ struct list_head *cur, *nxt;
+ unsigned long flags;
+
+ mutex_lock(&snic->disc.mutex);
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+
+ list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
+ tgt = list_entry(cur, struct snic_tgt, list);
+ tgt->state = SNIC_TGT_STAT_DEL;
+ list_del_init(&tgt->list);
+ SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
+ queue_work(snic_glob->event_q, &tgt->del_work);
+ tgt = NULL;
+ }
+ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+
+ scsi_flush_work(snic->shost);
+ mutex_unlock(&snic->disc.mutex);
+} /* end of snic_tgt_del_all */
diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h
new file mode 100644
index 000000000000..97fa3f5c5bb4
--- /dev/null
+++ b/drivers/scsi/snic/snic_disc.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __SNIC_DISC_H
+#define __SNIC_DISC_H
+
+#include "snic_fwint.h"
+
+enum snic_disc_state {
+ SNIC_DISC_NONE,
+ SNIC_DISC_INIT,
+ SNIC_DISC_PENDING,
+ SNIC_DISC_DONE
+};
+
+struct snic;
+struct snic_disc {
+ struct list_head tgt_list;
+ enum snic_disc_state state;
+ struct mutex mutex;
+ u16 disc_id;
+ u8 req_cnt;
+ u32 nxt_tgt_id;
+ u32 rtgt_cnt;
+ u8 *rtgt_info;
+ struct delayed_work disc_timeout;
+ void (*cb)(struct snic *);
+};
+
+#define SNIC_TGT_NAM_LEN 16
+
+enum snic_tgt_state {
+ SNIC_TGT_STAT_NONE,
+ SNIC_TGT_STAT_INIT,
+ SNIC_TGT_STAT_ONLINE, /* Target is Online */
+ SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
+ SNIC_TGT_STAT_DEL,
+};
+
+struct snic_tgt_priv {
+ struct list_head list;
+ enum snic_tgt_type typ;
+ u16 disc_id;
+ char *name[SNIC_TGT_NAM_LEN];
+
+ union {
+ /*DAS Target specific info */
+ /*SAN Target specific info */
+ u8 dummmy;
+ } u;
+};
+
+/* snic tgt flags */
+#define SNIC_TGT_SCAN_PENDING 0x01
+
+struct snic_tgt {
+ struct list_head list;
+ u16 id;
+ u16 channel;
+ u32 flags;
+ u32 scsi_tgt_id;
+ enum snic_tgt_state state;
+ struct device dev;
+ struct work_struct scan_work;
+ struct work_struct del_work;
+ struct snic_tgt_priv tdata;
+};
+
+
+struct snic_fw_req;
+
+void snic_disc_init(struct snic_disc *);
+int snic_disc_start(struct snic *);
+void snic_disc_term(struct snic *);
+int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
+int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
+void snic_process_report_tgts_rsp(struct work_struct *);
+void snic_handle_tgt_disc(struct work_struct *);
+void snic_handle_disc(struct work_struct *);
+void snic_tgt_dev_release(struct device *);
+void snic_tgt_del_all(struct snic *);
+
+#define dev_to_tgt(d) \
+ container_of(d, struct snic_tgt, dev)
+
+static inline int
+is_snic_target(struct device *dev)
+{
+ return dev->release == snic_tgt_dev_release;
+}
+
+#define starget_to_tgt(st) \
+ (is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
+ dev_to_tgt(st->dev.parent) : NULL)
+
+#define snic_tgt_to_shost(t) \
+ dev_to_shost(t->dev.parent)
+
+static inline int
+snic_tgt_chkready(struct snic_tgt *tgt)
+{
+ if (tgt->state == SNIC_TGT_STAT_ONLINE)
+ return 0;
+ else
+ return DID_NO_CONNECT << 16;
+}
+
+const char *snic_tgt_state_to_str(int);
+int snic_tgt_scsi_abort_io(struct snic_tgt *);
+#endif /* end of __SNIC_DISC_H */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
new file mode 100644
index 000000000000..2cfaf2dc915f
--- /dev/null
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -0,0 +1,525 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __SNIC_FWINT_H
+#define __SNIC_FWINT_H
+
+#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */
+#define LUN_ADDR_LEN 8
+
+/*
+ * Command entry type
+ */
+enum snic_io_type {
+ /*
+ * Initiator request types
+ */
+ SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */
+ SNIC_REQ_ICMND, /* Initiator command for SCSI IO */
+ SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */
+ SNIC_REQ_HBA_RESET, /* SNIC Reset */
+ SNIC_REQ_EXCH_VER, /* Exchange Version Information */
+ SNIC_REQ_TGT_INFO, /* Backend/Target Information */
+ SNIC_REQ_BOOT_LUNS,
+
+ /*
+ * Response type
+ */
+ SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */
+ SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */
+ SNIC_RSP_ITMF_CMPL, /* Task Management Completion */
+ SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */
+ SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/
+ SNIC_RSP_BOOT_LUNS_CMPL,
+
+ /*
+ * Misc Request types
+ */
+ SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */
+ SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */
+}; /* end of enum snic_io_type */
+
+
+/*
+ * Header status codes from firmware
+ */
+enum snic_io_status {
+ SNIC_STAT_IO_SUCCESS = 0, /* request was successful */
+
+ /*
+ * If a request to the fw is rejected, the original request header
+ * will be returned with the status set to one of the following:
+ */
+ SNIC_STAT_INVALID_HDR, /* header contains invalid data */
+ SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */
+ SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */
+ SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */
+ SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */
+
+ /*
+ * Once a request is processed, the fw will usually return
+ * a cmpl message type. In cases where errors occurred,
+ * the header status would be filled in with one of the following:
+ */
+ SNIC_STAT_ABORTED, /* req was aborted */
+ SNIC_STAT_TIMEOUT, /* req was timed out */
+ SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */
+ SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */
+ SNIC_STAT_FW_ERR, /* req was terminated due to fw error */
+ SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */
+ SNIC_STAT_ITMF_FAIL, /* itmf req was failed */
+ SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/
+ SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */
+ SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */
+ SNIC_STAT_NO_BOOTLUN,
+ SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */
+ SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */
+ SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */
+}; /* end of enum snic_io_status */
+
+/*
+ * snic_io_hdr : host <--> firmare
+ *
+ * for any other message that will be queued to firmware should
+ * have the following request header
+ */
+struct snic_io_hdr {
+ __le32 hid;
+ __le32 cmnd_id; /* tag here */
+ ulong init_ctx; /* initiator context */
+ u8 type; /* request/response type */
+ u8 status; /* header status entry */
+ u8 protocol; /* Protocol specific, may needed for RoCE*/
+ u8 flags;
+ __le16 sg_cnt;
+ u16 resvd;
+};
+
+/* auxillary funciton for encoding the snic_io_hdr */
+static inline void
+snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid,
+ u16 sg_cnt, ulong ctx)
+{
+ hdr->type = typ;
+ hdr->status = status;
+ hdr->protocol = 0;
+ hdr->hid = cpu_to_le32(hid);
+ hdr->cmnd_id = cpu_to_le32(id);
+ hdr->sg_cnt = cpu_to_le16(sg_cnt);
+ hdr->init_ctx = ctx;
+ hdr->flags = 0;
+}
+
+/* auxillary funciton for decoding the snic_io_hdr */
+static inline void
+snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id,
+ u32 *hid, ulong *ctx)
+{
+ *typ = hdr->type;
+ *stat = hdr->status;
+ *hid = le32_to_cpu(hdr->hid);
+ *cmnd_id = le32_to_cpu(hdr->cmnd_id);
+ *ctx = hdr->init_ctx;
+}
+
+/*
+ * snic_host_info: host -> firmware
+ *
+ * Used for sending host information to firmware, and request fw version
+ */
+struct snic_exch_ver_req {
+ __le32 drvr_ver; /* for debugging, when fw dump captured */
+ __le32 os_type; /* for OS specific features */
+};
+
+/*
+ * os_type flags
+ * Bit 0-7 : OS information
+ * Bit 8-31: Feature/Capability Information
+ */
+#define SNIC_OS_LINUX 0x1
+#define SNIC_OS_WIN 0x2
+#define SNIC_OS_ESX 0x3
+
+/*
+ * HBA Capabilities
+ * Bit 1: Reserved.
+ * Bit 2: Dynamic Discovery of LUNs.
+ * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 4: IO timeout support in FW.
+ * Bit 5-31: Reserved.
+ */
+#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */
+#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */
+#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */
+
+/*
+ * snic_exch_ver_rsp : firmware -> host
+ *
+ * Used by firmware to send response to version request
+ */
+struct snic_exch_ver_rsp {
+ __le32 version;
+ __le32 hid;
+ __le32 max_concur_ios; /* max concurrent ios */
+ __le32 max_sgs_per_cmd; /* max sgls per IO */
+ __le32 max_io_sz; /* max io size supported */
+ __le32 hba_cap; /* hba capabilities */
+ __le32 max_tgts; /* max tgts supported */
+ __le16 io_timeout; /* FW extended timeout */
+ u16 rsvd;
+};
+
+
+/*
+ * snic_report_tgts : host -> firmware request
+ *
+ * Used by the host to request list of targets
+ */
+struct snic_report_tgts {
+ __le16 sg_cnt;
+ __le16 flags; /* specific flags from fw */
+ u8 _resvd[4];
+ __le64 sg_addr; /* Points to SGL */
+ __le64 sense_addr;
+};
+
+enum snic_type {
+ SNIC_NONE = 0x0,
+ SNIC_DAS,
+ SNIC_SAN,
+};
+
+
+/* Report Target Response */
+enum snic_tgt_type {
+ SNIC_TGT_NONE = 0x0,
+ SNIC_TGT_DAS, /* DAS Target */
+ SNIC_TGT_SAN, /* SAN Target */
+};
+
+/* target id format */
+struct snic_tgt_id {
+ __le32 tgt_id; /* target id */
+ __le16 tgt_type; /* tgt type */
+ __le16 vnic_id; /* corresponding vnic id */
+};
+
+/*
+ * snic_report_tgts_cmpl : firmware -> host response
+ *
+ * Used by firmware to send response to Report Targets request
+ */
+struct snic_report_tgts_cmpl {
+ __le32 tgt_cnt; /* Number of Targets accessible */
+ u32 _resvd;
+};
+
+/*
+ * Command flags
+ *
+ * Bit 0: Read flags
+ * Bit 1: Write flag
+ * Bit 2: ESGL - sg/esg array contains extended sg
+ * ESGE - is a host buffer contains sg elements
+ * Bit 3-4: Task Attributes
+ * 00b - simple
+ * 01b - head of queue
+ * 10b - ordered
+ * Bit 5-7: Priority - future use
+ * Bit 8-15: Reserved
+ */
+
+#define SNIC_ICMND_WR 0x01 /* write command */
+#define SNIC_ICMND_RD 0x02 /* read command */
+#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */
+#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff))
+#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */
+#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */
+#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */
+
+#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */
+
+/*
+ * snic_icmnd : host-> firmware request
+ *
+ * used for sending out an initiator SCSI 16/32-byte command
+ */
+struct snic_icmnd {
+ __le16 sg_cnt; /* Number of SG Elements */
+ __le16 flags; /* flags */
+ __le32 sense_len; /* Sense buffer length */
+ __le64 tgt_id; /* Destination Target ID */
+ __le64 lun_id; /* Destination LUN ID */
+ u8 cdb_len;
+ u8 _resvd;
+ __le16 time_out; /* ms time for Res allocations fw to handle io*/
+ __le32 data_len; /* Total number of bytes to be transferred */
+ u8 cdb[SNIC_CDB_LEN];
+ __le64 sg_addr; /* Points to SG List */
+ __le64 sense_addr; /* Sense buffer address */
+};
+
+
+/* Response flags */
+/* Bit 0: Under run
+ * Bit 1: Over Run
+ * Bit 2-7: Reserved
+ */
+#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */
+#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */
+
+/*
+ * snic_icmnd_cmpl: firmware -> host response
+ *
+ * Used for sending the host a response to an icmnd (initiator command)
+ */
+struct snic_icmnd_cmpl {
+ u8 scsi_status; /* value as per SAM */
+ u8 flags;
+ __le16 sense_len; /* Sense Length */
+ __le32 resid; /* Residue : # bytes under or over run */
+};
+
+/*
+ * snic_itmf: host->firmware request
+ *
+ * used for requesting the firmware to abort a request and/or send out
+ * a task management function
+ *
+ * the req_id field is valid in case of abort task and clear task
+ */
+struct snic_itmf {
+ u8 tm_type; /* SCSI Task Management request */
+ u8 resvd;
+ __le16 flags; /* flags */
+ __le32 req_id; /* Command id of snic req to be aborted */
+ __le64 tgt_id; /* Target ID */
+ __le64 lun_id; /* Destination LUN ID */
+ __le16 timeout; /* in sec */
+};
+
+/*
+ * Task Management Request
+ */
+enum snic_itmf_tm_type {
+ SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */
+ SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */
+ SNIC_ITMF_CLR_TASK, /* Clear Task */
+ SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */
+ SNIC_ITMF_LUN_RESET, /* Lun Reset */
+ SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */
+};
+
+/*
+ * snic_itmf_cmpl: firmware -> host resposne
+ *
+ * used for sending the host a response for a itmf request
+ */
+struct snic_itmf_cmpl {
+ __le32 nterminated; /* # IOs terminated as a result of tmf */
+ u8 flags; /* flags */
+ u8 _resvd[3];
+};
+
+/*
+ * itmfl_cmpl flags
+ * Bit 0 : 1 - Num terminated field valid
+ * Bit 1 - 7 : Reserved
+ */
+#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */
+
+/*
+ * snic_hba_reset: host -> firmware request
+ *
+ * used for requesting firmware to reset snic
+ */
+struct snic_hba_reset {
+ __le16 flags; /* flags */
+ u8 _resvd[6];
+};
+
+/*
+ * snic_hba_reset_cmpl: firmware -> host response
+ *
+ * Used by firmware to respond to the host's hba reset request
+ */
+struct snic_hba_reset_cmpl {
+ u8 flags; /* flags : more info needs to be added*/
+ u8 _resvd[7];
+};
+
+/*
+ * snic_notify_msg: firmware -> host response
+ *
+ * Used by firmware to notify host of the last work queue entry received
+ */
+struct snic_notify_msg {
+ __le32 wqe_num; /* wq entry number */
+ u8 flags; /* flags, macros */
+ u8 _resvd[4];
+};
+
+
+#define SNIC_EVDATA_LEN 24 /* in bytes */
+/* snic_async_evnotify: firmware -> host notification
+ *
+ * Used by firmware to notify the host about configuration/state changes
+ */
+struct snic_async_evnotify {
+ u8 FLS_EVENT_DESC;
+ u8 vnic; /* vnic id */
+ u8 _resvd[2];
+ __le32 ev_id; /* Event ID */
+ u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */
+ u8 _resvd2[4];
+};
+
+/* async event flags */
+enum snic_ev_type {
+ SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */
+ SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */
+ SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */
+ SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */
+ SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */
+ SNIC_EV_TGT_ADDED, /* Target Added */
+ SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */
+ SNIC_EV_LUN_ADDED, /* LUN Added */
+ SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */
+
+ SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */
+};
+
+
+#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/
+/* Payload 88 bytes = 128 - 24 - 16 */
+#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
+ sizeof(struct snic_io_hdr) - \
+ (2 * sizeof(u64))))
+
+/*
+ * snic_host_req: host -> firmware request
+ *
+ * Basic structure for all snic requests that are sent from the host to
+ * firmware. They are 128 bytes in size.
+ */
+struct snic_host_req {
+ u64 ctrl_data[2]; /*16 bytes - Control Data */
+ struct snic_io_hdr hdr;
+ union {
+ /*
+ * Entry specific space, last byte contains color
+ */
+ u8 buf[SNIC_HOST_REQ_PAYLOAD];
+
+ /*
+ * Exchange firmware version
+ */
+ struct snic_exch_ver_req exch_ver;
+
+ /* report targets */
+ struct snic_report_tgts rpt_tgts;
+
+ /* io request */
+ struct snic_icmnd icmnd;
+
+ /* task management request */
+ struct snic_itmf itmf;
+
+ /* hba reset */
+ struct snic_hba_reset reset;
+ } u;
+}; /* end of snic_host_req structure */
+
+
+#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */
+struct snic_fw_req {
+ struct snic_io_hdr hdr;
+ union {
+ /*
+ * Entry specific space, last byte contains color
+ */
+ u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)];
+
+ /* Exchange Version Response */
+ struct snic_exch_ver_rsp exch_ver_cmpl;
+
+ /* Report Targets Response */
+ struct snic_report_tgts_cmpl rpt_tgts_cmpl;
+
+ /* scsi response */
+ struct snic_icmnd_cmpl icmnd_cmpl;
+
+ /* task management response */
+ struct snic_itmf_cmpl itmf_cmpl;
+
+ /* hba reset response */
+ struct snic_hba_reset_cmpl reset_cmpl;
+
+ /* notify message */
+ struct snic_notify_msg ack;
+
+ /* async notification event */
+ struct snic_async_evnotify async_ev;
+
+ } u;
+}; /* end of snic_fw_req structure */
+
+/*
+ * Auxillary macro to verify specific snic req/cmpl structures
+ * to ensure that it will be aligned to 64 bit, and not using
+ * color bit field
+ */
+#define VERIFY_REQ_SZ(x)
+#define VERIFY_CMPL_SZ(x)
+
+/*
+ * Access routines to encode and decode the color bit, which is the most
+ * significant bit of the structure.
+ */
+static inline void
+snic_color_enc(struct snic_fw_req *req, u8 color)
+{
+ u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
+
+ if (color)
+ *c |= 0x80;
+ else
+ *c &= ~0x80;
+}
+
+static inline void
+snic_color_dec(struct snic_fw_req *req, u8 *color)
+{
+ u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
+
+ *color = *c >> 7;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+}
+#endif /* end of __SNIC_FWINT_H */
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
new file mode 100644
index 000000000000..993db7de4e4b
--- /dev/null
+++ b/drivers/scsi/snic/snic_io.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/mempool.h>
+#include <scsi/scsi_tcq.h>
+
+#include "snic_io.h"
+#include "snic.h"
+#include "cq_enet_desc.h"
+#include "snic_fwint.h"
+
+static void
+snic_wq_cmpl_frame_send(struct vnic_wq *wq,
+ struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf,
+ void *opaque)
+{
+ struct snic *snic = svnic_dev_priv(wq->vdev);
+
+ SNIC_BUG_ON(buf->os_buf == NULL);
+
+ if (snic_log_level & SNIC_DESC_LOGGING)
+ SNIC_HOST_INFO(snic->shost,
+ "Ack received for snic_host_req %p.\n",
+ buf->os_buf);
+
+ SNIC_TRC(snic->shost->host_no, 0, 0,
+ ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
+ 0);
+ pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+ buf->os_buf = NULL;
+}
+
+static int
+snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
+ struct cq_desc *cq_desc,
+ u8 type,
+ u16 q_num,
+ u16 cmpl_idx,
+ void *opaque)
+{
+ struct snic *snic = svnic_dev_priv(vdev);
+ unsigned long flags;
+
+ SNIC_BUG_ON(q_num != 0);
+
+ spin_lock_irqsave(&snic->wq_lock[q_num], flags);
+ svnic_wq_service(&snic->wq[q_num],
+ cq_desc,
+ cmpl_idx,
+ snic_wq_cmpl_frame_send,
+ NULL);
+ spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
+
+ return 0;
+} /* end of snic_cmpl_handler_cont */
+
+int
+snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
+{
+ unsigned int work_done = 0;
+ unsigned int i;
+
+ snic->s_stats.misc.last_ack_time = jiffies;
+ for (i = 0; i < snic->wq_count; i++) {
+ work_done += svnic_cq_service(&snic->cq[i],
+ work_to_do,
+ snic_wq_cmpl_handler_cont,
+ NULL);
+ }
+
+ return work_done;
+} /* end of snic_wq_cmpl_handler */
+
+void
+snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+
+ struct snic_host_req *req = buf->os_buf;
+ struct snic *snic = svnic_dev_priv(wq->vdev);
+ struct snic_req_info *rqi = NULL;
+ unsigned long flags;
+
+ pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+
+ rqi = req_to_rqi(req);
+ spin_lock_irqsave(&snic->spl_cmd_lock, flags);
+ if (list_empty(&rqi->list)) {
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+ goto end;
+ }
+
+ SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
+ list_del_init(&rqi->list);
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+
+ if (rqi->sge_va) {
+ snic_pci_unmap_rsp_buf(snic, rqi);
+ kfree((void *)rqi->sge_va);
+ rqi->sge_va = 0;
+ }
+ snic_req_free(snic, rqi);
+ SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
+
+end:
+ return;
+}
+
+/* Criteria to select work queue in multi queue mode */
+static int
+snic_select_wq(struct snic *snic)
+{
+ /* No multi queue support for now */
+ BUILD_BUG_ON(SNIC_WQ_MAX > 1);
+
+ return 0;
+}
+
+int
+snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
+{
+ dma_addr_t pa = 0;
+ unsigned long flags;
+ struct snic_fw_stats *fwstats = &snic->s_stats.fw;
+ long act_reqs;
+ int q_num = 0;
+
+ snic_print_desc(__func__, os_buf, len);
+
+ /* Map request buffer */
+ pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(snic->pdev, pa)) {
+ SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
+
+ return -ENOMEM;
+ }
+
+ q_num = snic_select_wq(snic);
+
+ spin_lock_irqsave(&snic->wq_lock[q_num], flags);
+ if (!svnic_wq_desc_avail(snic->wq)) {
+ pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+ spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
+ atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
+ SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
+
+ return -ENOMEM;
+ }
+
+ snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
+ spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
+
+ /* Update stats */
+ act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
+ if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
+ atomic64_set(&fwstats->max_actv_reqs, act_reqs);
+
+ return 0;
+} /* end of snic_queue_wq_desc() */
+
+/*
+ * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
+ * Purpose : Used during driver unload to clean up the requests.
+ */
+void
+snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
+{
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&rqi->list);
+
+ spin_lock_irqsave(&snic->spl_cmd_lock, flags);
+ list_add_tail(&rqi->list, &snic->spl_cmd_list);
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+}
+
+/*
+ * snic_req_init:
+ * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
+ */
+struct snic_req_info *
+snic_req_init(struct snic *snic, int sg_cnt)
+{
+ u8 typ;
+ struct snic_req_info *rqi = NULL;
+
+ typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
+ SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
+
+ rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
+ if (!rqi) {
+ atomic64_inc(&snic->s_stats.io.alloc_fail);
+ SNIC_HOST_ERR(snic->shost,
+ "Failed to allocate memory from snic req pool id = %d\n",
+ typ);
+ return rqi;
+ }
+
+ memset(rqi, 0, sizeof(*rqi));
+ rqi->rq_pool_type = typ;
+ rqi->start_time = jiffies;
+ rqi->req = (struct snic_host_req *) (rqi + 1);
+ rqi->req_len = sizeof(struct snic_host_req);
+ rqi->snic = snic;
+
+ rqi->req = (struct snic_host_req *)(rqi + 1);
+
+ if (sg_cnt == 0)
+ goto end;
+
+ rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
+
+ if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
+ atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
+
+ SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
+ atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
+
+end:
+ memset(rqi->req, 0, rqi->req_len);
+
+ /* pre initialization of init_ctx to support req_to_rqi */
+ rqi->req->hdr.init_ctx = (ulong) rqi;
+
+ SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
+
+ return rqi;
+} /* end of snic_req_init */
+
+/*
+ * snic_abort_req_init : Inits abort request.
+ */
+struct snic_host_req *
+snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
+{
+ struct snic_host_req *req = NULL;
+
+ SNIC_BUG_ON(!rqi);
+
+ /* If abort to be issued second time, then reuse */
+ if (rqi->abort_req)
+ return rqi->abort_req;
+
+
+ req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
+ if (!req) {
+ SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
+ WARN_ON_ONCE(1);
+
+ return NULL;
+ }
+
+ rqi->abort_req = req;
+ memset(req, 0, sizeof(struct snic_host_req));
+ /* pre initialization of init_ctx to support req_to_rqi */
+ req->hdr.init_ctx = (ulong) rqi;
+
+ return req;
+} /* end of snic_abort_req_init */
+
+/*
+ * snic_dr_req_init : Inits device reset req
+ */
+struct snic_host_req *
+snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
+{
+ struct snic_host_req *req = NULL;
+
+ SNIC_BUG_ON(!rqi);
+
+ req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
+ if (!req) {
+ SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
+ WARN_ON_ONCE(1);
+
+ return NULL;
+ }
+
+ SNIC_BUG_ON(rqi->dr_req != NULL);
+ rqi->dr_req = req;
+ memset(req, 0, sizeof(struct snic_host_req));
+ /* pre initialization of init_ctx to support req_to_rqi */
+ req->hdr.init_ctx = (ulong) rqi;
+
+ return req;
+} /* end of snic_dr_req_init */
+
+/* frees snic_req_info and snic_host_req */
+void
+snic_req_free(struct snic *snic, struct snic_req_info *rqi)
+{
+ SNIC_BUG_ON(rqi->req == rqi->abort_req);
+ SNIC_BUG_ON(rqi->req == rqi->dr_req);
+ SNIC_BUG_ON(rqi->sge_va != 0);
+
+ SNIC_SCSI_DBG(snic->shost,
+ "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
+ rqi, rqi->req, rqi->abort_req, rqi->dr_req);
+
+ if (rqi->abort_req)
+ mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+
+ if (rqi->dr_req)
+ mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+
+ mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
+}
+
+void
+snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
+{
+ struct snic_sg_desc *sgd;
+
+ sgd = req_to_sgl(rqi_to_req(rqi));
+ SNIC_BUG_ON(sgd[0].addr == 0);
+ pci_unmap_single(snic->pdev,
+ le64_to_cpu(sgd[0].addr),
+ le32_to_cpu(sgd[0].len),
+ PCI_DMA_FROMDEVICE);
+}
+
+/*
+ * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
+ */
+void
+snic_free_all_untagged_reqs(struct snic *snic)
+{
+ struct snic_req_info *rqi;
+ struct list_head *cur, *nxt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&snic->spl_cmd_lock, flags);
+ list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
+ rqi = list_entry(cur, struct snic_req_info, list);
+ list_del_init(&rqi->list);
+ if (rqi->sge_va) {
+ snic_pci_unmap_rsp_buf(snic, rqi);
+ kfree((void *)rqi->sge_va);
+ rqi->sge_va = 0;
+ }
+
+ snic_req_free(snic, rqi);
+ }
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+}
+
+/*
+ * snic_release_untagged_req : Unlinks the untagged req and frees it.
+ */
+void
+snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ if (snic->in_remove) {
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ goto end;
+ }
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ spin_lock_irqsave(&snic->spl_cmd_lock, flags);
+ if (list_empty(&rqi->list)) {
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+ goto end;
+ }
+ list_del_init(&rqi->list);
+ spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
+ snic_req_free(snic, rqi);
+
+end:
+ return;
+}
+
+/* dump buf in hex fmt */
+void
+snic_hex_dump(char *pfx, char *data, int len)
+{
+ SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
+ print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
+}
+
+#define LINE_BUFSZ 128 /* for snic_print_desc fn */
+static void
+snic_dump_desc(const char *fn, char *os_buf, int len)
+{
+ struct snic_host_req *req = (struct snic_host_req *) os_buf;
+ struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
+ struct snic_req_info *rqi = NULL;
+ char line[LINE_BUFSZ] = { '\0' };
+ char *cmd_str = NULL;
+
+ if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
+ rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
+ else
+ rqi = (struct snic_req_info *) req->hdr.init_ctx;
+
+ SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
+ switch (req->hdr.type) {
+ case SNIC_REQ_REPORT_TGTS:
+ cmd_str = "report-tgt : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
+ break;
+
+ case SNIC_REQ_ICMND:
+ cmd_str = "icmnd : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
+ req->u.icmnd.cdb[0]);
+ break;
+
+ case SNIC_REQ_ITMF:
+ cmd_str = "itmf : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
+ break;
+
+ case SNIC_REQ_HBA_RESET:
+ cmd_str = "hba reset :";
+ snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
+ break;
+
+ case SNIC_REQ_EXCH_VER:
+ cmd_str = "exch ver : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
+ break;
+
+ case SNIC_REQ_TGT_INFO:
+ cmd_str = "tgt info : ";
+ break;
+
+ case SNIC_RSP_REPORT_TGTS_CMPL:
+ cmd_str = "report tgt cmpl : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
+ break;
+
+ case SNIC_RSP_ICMND_CMPL:
+ cmd_str = "icmnd_cmpl : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
+ rqi->req->u.icmnd.cdb[0]);
+ break;
+
+ case SNIC_RSP_ITMF_CMPL:
+ cmd_str = "itmf_cmpl : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
+ break;
+
+ case SNIC_RSP_HBA_RESET_CMPL:
+ cmd_str = "hba_reset_cmpl : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
+ break;
+
+ case SNIC_RSP_EXCH_VER_CMPL:
+ cmd_str = "exch_ver_cmpl : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
+ break;
+
+ case SNIC_MSG_ACK:
+ cmd_str = "msg ack : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
+ break;
+
+ case SNIC_MSG_ASYNC_EVNOTIFY:
+ cmd_str = "async notify : ";
+ snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
+ break;
+
+ default:
+ cmd_str = "unknown : ";
+ SNIC_BUG_ON(1);
+ break;
+ }
+
+ SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
+ fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
+ req->hdr.init_ctx);
+
+ /* Enable it, to dump byte stream */
+ if (snic_log_level & 0x20)
+ snic_hex_dump(cmd_str, os_buf, len);
+} /* end of __snic_print_desc */
+
+void
+snic_print_desc(const char *fn, char *os_buf, int len)
+{
+ if (snic_log_level & SNIC_DESC_LOGGING)
+ snic_dump_desc(fn, os_buf, len);
+}
+
+void
+snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
+{
+ u64 duration;
+
+ duration = jiffies - rqi->start_time;
+
+ if (duration > atomic64_read(&snic->s_stats.io.max_time))
+ atomic64_set(&snic->s_stats.io.max_time, duration);
+}
diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h
new file mode 100644
index 000000000000..093d6524cd42
--- /dev/null
+++ b/drivers/scsi/snic/snic_io.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _SNIC_IO_H
+#define _SNIC_IO_H
+
+#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
+#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
+#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
+
+/* SG descriptor for snic */
+struct snic_sg_desc {
+ __le64 addr;
+ __le32 len;
+ u32 _resvd;
+};
+
+struct snic_dflt_sgl {
+ struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
+};
+
+struct snic_max_sgl {
+ struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
+};
+
+enum snic_req_cache_type {
+ SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
+ SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
+ SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
+ snic_host_req objects only*/
+ SNIC_REQ_MAX_CACHES /* number of sgl caches */
+};
+
+/* Per IO internal state */
+struct snic_internal_io_state {
+ char *rqi;
+ u64 flags;
+ u32 state;
+ u32 abts_status; /* Abort completion status */
+ u32 lr_status; /* device reset completion status */
+};
+
+/* IO state machine */
+enum snic_ioreq_state {
+ SNIC_IOREQ_NOT_INITED = 0,
+ SNIC_IOREQ_PENDING,
+ SNIC_IOREQ_ABTS_PENDING,
+ SNIC_IOREQ_ABTS_COMPLETE,
+ SNIC_IOREQ_LR_PENDING,
+ SNIC_IOREQ_LR_COMPLETE,
+ SNIC_IOREQ_COMPLETE,
+};
+
+struct snic;
+struct snic_host_req;
+
+/*
+ * snic_req_info : Contains info about IO, one per scsi command.
+ * Notes: Make sure that the structure is aligned to 16 B
+ * this helps in easy access to snic_req_info from snic_host_req
+ */
+struct snic_req_info {
+ struct list_head list;
+ struct snic_host_req *req;
+ u64 start_time; /* start time in jiffies */
+ u16 rq_pool_type; /* noticion of request pool type */
+ u16 req_len; /* buf len passing to fw (req + sgl)*/
+ u32 tgt_id;
+
+ u32 tm_tag;
+ u8 io_cmpl:1; /* sets to 1 when fw completes IO */
+ u8 resvd[3];
+ struct scsi_cmnd *sc; /* Associated scsi cmd */
+ struct snic *snic; /* Associated snic */
+ ulong sge_va; /* Pointer to Resp Buffer */
+ u64 snsbuf_va;
+
+ struct snic_host_req *abort_req;
+ struct completion *abts_done;
+
+ struct snic_host_req *dr_req;
+ struct completion *dr_done;
+};
+
+
+#define rqi_to_req(rqi) \
+ ((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
+
+#define req_to_rqi(req) \
+ ((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
+
+#define req_to_sgl(req) \
+ ((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
+
+struct snic_req_info *
+snic_req_init(struct snic *, int sg_cnt);
+void snic_req_free(struct snic *, struct snic_req_info *);
+void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
+void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
+struct snic_host_req *
+snic_abort_req_init(struct snic *, struct snic_req_info *);
+struct snic_host_req *
+snic_dr_req_init(struct snic *, struct snic_req_info *);
+#endif /* _SNIC_IO_H */
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
new file mode 100644
index 000000000000..a85fae25ec8c
--- /dev/null
+++ b/drivers/scsi/snic/snic_isr.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "snic_io.h"
+#include "snic.h"
+
+
+/*
+ * snic_isr_msix_wq : MSIx ISR for work queue.
+ */
+
+static irqreturn_t
+snic_isr_msix_wq(int irq, void *data)
+{
+ struct snic *snic = data;
+ unsigned long wq_work_done = 0;
+
+ snic->s_stats.misc.last_isr_time = jiffies;
+ atomic64_inc(&snic->s_stats.misc.isr_cnt);
+
+ wq_work_done = snic_wq_cmpl_handler(snic, -1);
+ svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
+ wq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+} /* end of snic_isr_msix_wq */
+
+static irqreturn_t
+snic_isr_msix_io_cmpl(int irq, void *data)
+{
+ struct snic *snic = data;
+ unsigned long iocmpl_work_done = 0;
+
+ snic->s_stats.misc.last_isr_time = jiffies;
+ atomic64_inc(&snic->s_stats.misc.isr_cnt);
+
+ iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
+ svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
+ iocmpl_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+} /* end of snic_isr_msix_io_cmpl */
+
+static irqreturn_t
+snic_isr_msix_err_notify(int irq, void *data)
+{
+ struct snic *snic = data;
+
+ snic->s_stats.misc.last_isr_time = jiffies;
+ atomic64_inc(&snic->s_stats.misc.isr_cnt);
+
+ svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
+ snic_log_q_error(snic);
+
+ /*Handling link events */
+ snic_handle_link_event(snic);
+
+ return IRQ_HANDLED;
+} /* end of snic_isr_msix_err_notify */
+
+
+void
+snic_free_intr(struct snic *snic)
+{
+ int i;
+
+ /* ONLY interrupt mode MSIX is supported */
+ for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
+ if (snic->msix[i].requested) {
+ free_irq(snic->msix_entry[i].vector,
+ snic->msix[i].devid);
+ }
+ }
+} /* end of snic_free_intr */
+
+int
+snic_request_intr(struct snic *snic)
+{
+ int ret = 0, i;
+ enum vnic_dev_intr_mode intr_mode;
+
+ intr_mode = svnic_dev_get_intr_mode(snic->vdev);
+ SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
+
+ /*
+ * Currently HW supports single WQ and CQ. So passing devid as snic.
+ * When hardware supports multiple WQs and CQs, one idea is
+ * to pass devid as corresponding WQ or CQ ptr and retrieve snic
+ * from queue ptr.
+ * Except for err_notify, which is always one.
+ */
+ sprintf(snic->msix[SNIC_MSIX_WQ].devname,
+ "%.11s-scsi-wq",
+ snic->name);
+ snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
+ snic->msix[SNIC_MSIX_WQ].devid = snic;
+
+ sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
+ "%.11s-io-cmpl",
+ snic->name);
+ snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
+ snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
+
+ sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
+ "%.11s-err-notify",
+ snic->name);
+ snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
+ snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
+
+ for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
+ ret = request_irq(snic->msix_entry[i].vector,
+ snic->msix[i].isr,
+ 0,
+ snic->msix[i].devname,
+ snic->msix[i].devid);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "MSI-X: requrest_irq(%d) failed %d\n",
+ i,
+ ret);
+ snic_free_intr(snic);
+ break;
+ }
+ snic->msix[i].requested = 1;
+ }
+
+ return ret;
+} /* end of snic_requrest_intr */
+
+int
+snic_set_intr_mode(struct snic *snic)
+{
+ unsigned int n = ARRAY_SIZE(snic->wq);
+ unsigned int m = SNIC_CQ_IO_CMPL_MAX;
+ unsigned int i;
+
+ /*
+ * We need n WQs, m CQs, and n+m+1 INTRs
+ * (last INTR is used for WQ/CQ errors and notification area
+ */
+
+ BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
+ ARRAY_SIZE(snic->intr));
+ SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
+
+ for (i = 0; i < (n + m + 1); i++)
+ snic->msix_entry[i].entry = i;
+
+ if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
+ if (!pci_enable_msix(snic->pdev,
+ snic->msix_entry,
+ (n + m + 1))) {
+ snic->wq_count = n;
+ snic->cq_count = n + m;
+ snic->intr_count = n + m + 1;
+ snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
+
+ SNIC_ISR_DBG(snic->shost,
+ "Using MSI-X Interrupts\n");
+ svnic_dev_set_intr_mode(snic->vdev,
+ VNIC_DEV_INTR_MODE_MSIX);
+
+ return 0;
+ }
+ }
+
+ svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+ return -EINVAL;
+} /* end of snic_set_intr_mode */
+
+void
+snic_clear_intr_mode(struct snic *snic)
+{
+ pci_disable_msix(snic->pdev);
+
+ svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
+}
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
new file mode 100644
index 000000000000..b2b87cef00fc
--- /dev/null
+++ b/drivers/scsi/snic/snic_main.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "snic.h"
+#include "snic_fwint.h"
+
+#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
+
+/* Supported devices by snic module */
+static struct pci_device_id snic_id_table[] = {
+ {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
+ { 0, } /* end of table */
+};
+
+unsigned int snic_log_level = 0x0;
+module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
+
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+unsigned int snic_trace_max_pages = 16;
+module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(snic_trace_max_pages,
+ "Total allocated memory pages for snic trace buffer");
+
+#endif
+unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
+module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
+
+/*
+ * snic_slave_alloc : callback function to SCSI Mid Layer, called on
+ * scsi device initialization.
+ */
+static int
+snic_slave_alloc(struct scsi_device *sdev)
+{
+ struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
+
+ if (!tgt || snic_tgt_chkready(tgt))
+ return -ENXIO;
+
+ return 0;
+}
+
+/*
+ * snic_slave_configure : callback function to SCSI Mid Layer, called on
+ * scsi device initialization.
+ */
+static int
+snic_slave_configure(struct scsi_device *sdev)
+{
+ struct snic *snic = shost_priv(sdev->host);
+ u32 qdepth = 0, max_ios = 0;
+ int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
+
+ /* Set Queue Depth */
+ max_ios = snic_max_qdepth;
+ qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
+ scsi_change_queue_depth(sdev, qdepth);
+
+ if (snic->fwinfo.io_tmo > 1)
+ tmo = snic->fwinfo.io_tmo * HZ;
+
+ /* FW requires extended timeouts */
+ blk_queue_rq_timeout(sdev->request_queue, tmo);
+
+ return 0;
+}
+
+static int
+snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ int qsz = 0;
+
+ qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
+ scsi_change_queue_depth(sdev, qsz);
+ SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
+
+ return sdev->queue_depth;
+}
+
+static struct scsi_host_template snic_host_template = {
+ .module = THIS_MODULE,
+ .name = SNIC_DRV_NAME,
+ .queuecommand = snic_queuecommand,
+ .eh_abort_handler = snic_abort_cmd,
+ .eh_device_reset_handler = snic_device_reset,
+ .eh_host_reset_handler = snic_host_reset,
+ .slave_alloc = snic_slave_alloc,
+ .slave_configure = snic_slave_configure,
+ .change_queue_depth = snic_change_queue_depth,
+ .this_id = -1,
+ .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
+ .can_queue = SNIC_MAX_IO_REQ,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
+ .max_sectors = 0x800,
+ .shost_attrs = snic_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct snic_internal_io_state),
+ .proc_name = "snic_scsi",
+};
+
+/*
+ * snic_handle_link_event : Handles link events such as link up/down/error
+ */
+void
+snic_handle_link_event(struct snic *snic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ if (snic->stop_link_events) {
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ return;
+ }
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ queue_work(snic_glob->event_q, &snic->link_work);
+} /* end of snic_handle_link_event */
+
+/*
+ * snic_notify_set : sets notification area
+ * This notification area is to receive events from fw
+ * Note: snic supports only MSIX interrupts, in which we can just call
+ * svnic_dev_notify_set directly
+ */
+static int
+snic_notify_set(struct snic *snic)
+{
+ int ret = 0;
+ enum vnic_dev_intr_mode intr_mode;
+
+ intr_mode = svnic_dev_get_intr_mode(snic->vdev);
+
+ if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
+ ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
+ } else {
+ SNIC_HOST_ERR(snic->shost,
+ "Interrupt mode should be setup before devcmd notify set %d\n",
+ intr_mode);
+ ret = -1;
+ }
+
+ return ret;
+} /* end of snic_notify_set */
+
+/*
+ * snic_dev_wait : polls vnic open status.
+ */
+static int
+snic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ unsigned long time;
+ int ret, done;
+ int retry_cnt = 0;
+
+ ret = start(vdev, arg);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for func to complete...2 seconds max.
+ *
+ * Sometimes schedule_timeout_uninterruptible take long time
+ * to wakeup, which results skipping retry. The retry counter
+ * ensures to retry at least two times.
+ */
+ time = jiffies + (HZ * 2);
+ do {
+ ret = finished(vdev, &done);
+ if (ret)
+ return ret;
+
+ if (done)
+ return 0;
+ schedule_timeout_uninterruptible(HZ/10);
+ ++retry_cnt;
+ } while (time_after(time, jiffies) || (retry_cnt < 3));
+
+ return -ETIMEDOUT;
+} /* end of snic_dev_wait */
+
+/*
+ * snic_cleanup: called by snic_remove
+ * Stops the snic device, masks all interrupts, Completed CQ entries are
+ * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
+ */
+static int
+snic_cleanup(struct snic *snic)
+{
+ unsigned int i;
+ int ret;
+
+ svnic_dev_disable(snic->vdev);
+ for (i = 0; i < snic->intr_count; i++)
+ svnic_intr_mask(&snic->intr[i]);
+
+ for (i = 0; i < snic->wq_count; i++) {
+ ret = svnic_wq_disable(&snic->wq[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Clean up completed IOs */
+ snic_fwcq_cmpl_handler(snic, -1);
+
+ snic_wq_cmpl_handler(snic, -1);
+
+ /* Clean up the IOs that have not completed */
+ for (i = 0; i < snic->wq_count; i++)
+ svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
+
+ for (i = 0; i < snic->cq_count; i++)
+ svnic_cq_clean(&snic->cq[i]);
+
+ for (i = 0; i < snic->intr_count; i++)
+ svnic_intr_clean(&snic->intr[i]);
+
+ /* Cleanup snic specific requests */
+ snic_free_all_untagged_reqs(snic);
+
+ /* Cleanup Pending SCSI commands */
+ snic_shutdown_scsi_cleanup(snic);
+
+ for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
+ mempool_destroy(snic->req_pool[i]);
+
+ return 0;
+} /* end of snic_cleanup */
+
+
+static void
+snic_iounmap(struct snic *snic)
+{
+ if (snic->bar0.vaddr)
+ iounmap(snic->bar0.vaddr);
+}
+
+/*
+ * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
+ */
+static int
+snic_vdev_open_done(struct vnic_dev *vdev, int *done)
+{
+ struct snic *snic = svnic_dev_priv(vdev);
+ int ret;
+ int nretries = 5;
+
+ do {
+ ret = svnic_dev_open_done(vdev, done);
+ if (ret == 0)
+ break;
+
+ SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
+ } while (nretries--);
+
+ return ret;
+} /* end of snic_vdev_open_done */
+
+/*
+ * snic_add_host : registers scsi host with ML
+ */
+static int
+snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
+{
+ int ret = 0;
+
+ ret = scsi_add_host(shost, &pdev->dev);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "snic: scsi_add_host failed. %d\n",
+ ret);
+
+ return ret;
+ }
+
+ SNIC_BUG_ON(shost->work_q != NULL);
+ snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
+ shost->host_no);
+ shost->work_q = create_singlethread_workqueue(shost->work_q_name);
+ if (!shost->work_q) {
+ SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
+
+ ret = -ENOMEM;
+ }
+
+ return ret;
+} /* end of snic_add_host */
+
+static void
+snic_del_host(struct Scsi_Host *shost)
+{
+ if (!shost->work_q)
+ return;
+
+ destroy_workqueue(shost->work_q);
+ shost->work_q = NULL;
+ scsi_remove_host(shost);
+}
+
+int
+snic_get_state(struct snic *snic)
+{
+ return atomic_read(&snic->state);
+}
+
+void
+snic_set_state(struct snic *snic, enum snic_state state)
+{
+ SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
+ snic_state_to_str(snic_get_state(snic)),
+ snic_state_to_str(state));
+
+ atomic_set(&snic->state, state);
+}
+
+/*
+ * snic_probe : Initialize the snic interface.
+ */
+static int
+snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct Scsi_Host *shost;
+ struct snic *snic;
+ mempool_t *pool;
+ unsigned long flags;
+ u32 max_ios = 0;
+ int ret, i;
+
+ /* Device Information */
+ SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ /*
+ * Allocate SCSI Host and setup association between host, and snic
+ */
+ shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
+ if (!shost) {
+ SNIC_ERR("Unable to alloc scsi_host\n");
+ ret = -ENOMEM;
+
+ goto prob_end;
+ }
+ snic = shost_priv(shost);
+ snic->shost = shost;
+
+ snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
+ shost->host_no);
+
+ SNIC_HOST_INFO(shost,
+ "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
+ shost->host_no, snic, shost, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ /* Per snic debugfs init */
+ ret = snic_stats_debugfs_init(snic);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "Failed to initialize debugfs stats\n");
+ snic_stats_debugfs_remove(snic);
+ }
+#endif
+
+ /* Setup PCI Resources */
+ pci_set_drvdata(pdev, snic);
+ snic->pdev = pdev;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Cannot enable PCI Resources, aborting : %d\n",
+ ret);
+
+ goto err_free_snic;
+ }
+
+ ret = pci_request_regions(pdev, SNIC_DRV_NAME);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Cannot obtain PCI Resources, aborting : %d\n",
+ ret);
+
+ goto err_pci_disable;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * Query PCI Controller on system for DMA addressing
+ * limitation for the device. Try 43-bit first, and
+ * fail to 32-bit.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "No Usable DMA Configuration, aborting %d\n",
+ ret);
+
+ goto err_rel_regions;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
+ ret);
+
+ goto err_rel_regions;
+ }
+ } else {
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
+ ret);
+
+ goto err_rel_regions;
+ }
+ }
+
+
+ /* Map vNIC resources from BAR0 */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
+
+ ret = -ENODEV;
+ goto err_rel_regions;
+ }
+
+ snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
+ if (!snic->bar0.vaddr) {
+ SNIC_HOST_ERR(shost,
+ "Cannot memory map BAR0 res hdr aborting.\n");
+
+ ret = -ENODEV;
+ goto err_rel_regions;
+ }
+
+ snic->bar0.bus_addr = pci_resource_start(pdev, 0);
+ snic->bar0.len = pci_resource_len(pdev, 0);
+ SNIC_BUG_ON(snic->bar0.bus_addr == 0);
+
+ /* Devcmd2 Resource Allocation and Initialization */
+ snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
+ if (!snic->vdev) {
+ SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
+
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+
+ ret = svnic_dev_cmd_init(snic->vdev, 0);
+ if (ret) {
+ SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
+
+ goto err_vnic_unreg;
+ }
+
+ ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "vNIC dev open failed, aborting. %d\n",
+ ret);
+
+ goto err_vnic_unreg;
+ }
+
+ ret = svnic_dev_init(snic->vdev, 0);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "vNIC dev init failed. aborting. %d\n",
+ ret);
+
+ goto err_dev_close;
+ }
+
+ /* Get vNIC information */
+ ret = snic_get_vnic_config(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Get vNIC configuration failed, aborting. %d\n",
+ ret);
+
+ goto err_dev_close;
+ }
+
+ /* Configure Maximum Outstanding IO reqs */
+ max_ios = snic->config.io_throttle_count;
+ if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
+ max_t(u32, SNIC_MIN_IO_REQ, max_ios));
+
+ snic->max_tag_id = shost->can_queue;
+
+ ret = scsi_init_shared_tag_map(shost, snic->max_tag_id);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Unable to alloc shared tag map. %d\n",
+ ret);
+
+ goto err_dev_close;
+ }
+
+ shost->max_lun = snic->config.luns_per_tgt;
+ shost->max_id = SNIC_MAX_TARGET;
+
+ shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
+
+ snic_get_res_counts(snic);
+
+ /*
+ * Assumption: Only MSIx is supported
+ */
+ ret = snic_set_intr_mode(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Failed to set intr mode aborting. %d\n",
+ ret);
+
+ goto err_dev_close;
+ }
+
+ ret = snic_alloc_vnic_res(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Failed to alloc vNIC resources aborting. %d\n",
+ ret);
+
+ goto err_clear_intr;
+ }
+
+ /* Initialize specific lists */
+ INIT_LIST_HEAD(&snic->list);
+
+ /*
+ * spl_cmd_list for maintaining snic specific cmds
+ * such as EXCH_VER_REQ, REPORT_TARGETS etc
+ */
+ INIT_LIST_HEAD(&snic->spl_cmd_list);
+ spin_lock_init(&snic->spl_cmd_lock);
+
+ /* initialize all snic locks */
+ spin_lock_init(&snic->snic_lock);
+
+ for (i = 0; i < SNIC_WQ_MAX; i++)
+ spin_lock_init(&snic->wq_lock[i]);
+
+ for (i = 0; i < SNIC_IO_LOCKS; i++)
+ spin_lock_init(&snic->io_req_lock[i]);
+
+ pool = mempool_create_slab_pool(2,
+ snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
+ if (!pool) {
+ SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+
+ goto err_free_res;
+ }
+
+ snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
+
+ pool = mempool_create_slab_pool(2,
+ snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
+ if (!pool) {
+ SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+
+ goto err_free_dflt_sgl_pool;
+ }
+
+ snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
+
+ pool = mempool_create_slab_pool(2,
+ snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
+ if (!pool) {
+ SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+
+ goto err_free_max_sgl_pool;
+ }
+
+ snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
+
+ /* Initialize snic state */
+ atomic_set(&snic->state, SNIC_INIT);
+
+ atomic_set(&snic->ios_inflight, 0);
+
+ /* Setup notification buffer area */
+ ret = snic_notify_set(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Failed to alloc notify buffer aborting. %d\n",
+ ret);
+
+ goto err_free_tmreq_pool;
+ }
+
+ /*
+ * Initialization done with PCI system, hardware, firmware.
+ * Add shost to SCSI
+ */
+ ret = snic_add_host(shost, pdev);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Adding scsi host Failed ... exiting. %d\n",
+ ret);
+
+ goto err_notify_unset;
+ }
+
+ spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
+ list_add_tail(&snic->list, &snic_glob->snic_list);
+ spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
+
+ snic_disc_init(&snic->disc);
+ INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
+ INIT_WORK(&snic->disc_work, snic_handle_disc);
+ INIT_WORK(&snic->link_work, snic_handle_link);
+
+ /* Enable all queues */
+ for (i = 0; i < snic->wq_count; i++)
+ svnic_wq_enable(&snic->wq[i]);
+
+ ret = svnic_dev_enable_wait(snic->vdev);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "vNIC dev enable failed w/ error %d\n",
+ ret);
+
+ goto err_vdev_enable;
+ }
+
+ ret = snic_request_intr(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
+
+ goto err_req_intr;
+ }
+
+ for (i = 0; i < snic->intr_count; i++)
+ svnic_intr_unmask(&snic->intr[i]);
+
+ snic_set_state(snic, SNIC_ONLINE);
+
+ /* Get snic params */
+ ret = snic_get_conf(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Failed to get snic io config from FW w err %d\n",
+ ret);
+
+ goto err_get_conf;
+ }
+
+ ret = snic_disc_start(snic);
+ if (ret) {
+ SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
+ ret);
+
+ goto err_get_conf;
+ }
+
+ SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
+
+ return 0;
+
+err_get_conf:
+ snic_free_all_untagged_reqs(snic);
+
+ for (i = 0; i < snic->intr_count; i++)
+ svnic_intr_mask(&snic->intr[i]);
+
+ snic_free_intr(snic);
+
+err_req_intr:
+ svnic_dev_disable(snic->vdev);
+
+err_vdev_enable:
+ for (i = 0; i < snic->wq_count; i++) {
+ int rc = 0;
+
+ rc = svnic_wq_disable(&snic->wq[i]);
+ if (rc) {
+ SNIC_HOST_ERR(shost,
+ "WQ Disable Failed w/ err = %d\n", rc);
+
+ break;
+ }
+ }
+ snic_del_host(snic->shost);
+
+err_notify_unset:
+ svnic_dev_notify_unset(snic->vdev);
+
+err_free_tmreq_pool:
+ mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
+
+err_free_max_sgl_pool:
+ mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
+
+err_free_dflt_sgl_pool:
+ mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
+
+err_free_res:
+ snic_free_vnic_res(snic);
+
+err_clear_intr:
+ snic_clear_intr_mode(snic);
+
+err_dev_close:
+ svnic_dev_close(snic->vdev);
+
+err_vnic_unreg:
+ svnic_dev_unregister(snic->vdev);
+
+err_iounmap:
+ snic_iounmap(snic);
+
+err_rel_regions:
+ pci_release_regions(pdev);
+
+err_pci_disable:
+ pci_disable_device(pdev);
+
+err_free_snic:
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ snic_stats_debugfs_remove(snic);
+#endif
+ scsi_host_put(shost);
+ pci_set_drvdata(pdev, NULL);
+
+prob_end:
+ SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ return ret;
+} /* end of snic_probe */
+
+
+/*
+ * snic_remove : invoked on unbinding the interface to cleanup the
+ * resources allocated in snic_probe on initialization.
+ */
+static void
+snic_remove(struct pci_dev *pdev)
+{
+ struct snic *snic = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!snic) {
+ SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ return;
+ }
+
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events. ISR and other threads
+ * that can queue work items will also stop creating work
+ * items on the snic workqueue
+ */
+ snic_set_state(snic, SNIC_OFFLINE);
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ snic->stop_link_events = 1;
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ flush_workqueue(snic_glob->event_q);
+ snic_disc_term(snic);
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ snic->in_remove = 1;
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+ /*
+ * This stops the snic device, masks all interrupts, Completed
+ * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
+ * cleanup
+ */
+ snic_cleanup(snic);
+
+ spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
+ list_del(&snic->list);
+ spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
+
+ snic_tgt_del_all(snic);
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ snic_stats_debugfs_remove(snic);
+#endif
+ snic_del_host(snic->shost);
+
+ svnic_dev_notify_unset(snic->vdev);
+ snic_free_intr(snic);
+ snic_free_vnic_res(snic);
+ snic_clear_intr_mode(snic);
+ svnic_dev_close(snic->vdev);
+ svnic_dev_unregister(snic->vdev);
+ snic_iounmap(snic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ /* this frees Scsi_Host and snic memory (continuous chunk) */
+ scsi_host_put(snic->shost);
+} /* end of snic_remove */
+
+
+struct snic_global *snic_glob;
+
+/*
+ * snic_global_data_init: Initialize SNIC Global Data
+ * Notes: All the global lists, variables should be part of global data
+ * this helps in debugging.
+ */
+static int
+snic_global_data_init(void)
+{
+ int ret = 0;
+ struct kmem_cache *cachep;
+ ssize_t len = 0;
+
+ snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
+
+ if (!snic_glob) {
+ SNIC_ERR("Failed to allocate Global Context.\n");
+
+ ret = -ENOMEM;
+ goto gdi_end;
+ }
+
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ /* Debugfs related Initialization */
+ /* Create debugfs entries for snic */
+ ret = snic_debugfs_init();
+ if (ret < 0) {
+ SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
+ snic_debugfs_term();
+ /* continue even if it fails */
+ }
+
+ /* Trace related Initialization */
+ /* Allocate memory for trace buffer */
+ ret = snic_trc_init();
+ if (ret < 0) {
+ SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
+ snic_trc_free();
+ /* continue even if it fails */
+ }
+
+#endif
+ INIT_LIST_HEAD(&snic_glob->snic_list);
+ spin_lock_init(&snic_glob->snic_list_lock);
+
+ /* Create a cache for allocation of snic_host_req+default size ESGLs */
+ len = sizeof(struct snic_req_info);
+ len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
+ cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cachep) {
+ SNIC_ERR("Failed to create snic default sgl slab\n");
+ ret = -ENOMEM;
+
+ goto err_dflt_req_slab;
+ }
+ snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
+
+ /* Create a cache for allocation of max size Extended SGLs */
+ len = sizeof(struct snic_req_info);
+ len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
+ cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cachep) {
+ SNIC_ERR("Failed to create snic max sgl slab\n");
+ ret = -ENOMEM;
+
+ goto err_max_req_slab;
+ }
+ snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
+
+ len = sizeof(struct snic_host_req);
+ cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cachep) {
+ SNIC_ERR("Failed to create snic tm req slab\n");
+ ret = -ENOMEM;
+
+ goto err_tmreq_slab;
+ }
+ snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
+
+ /* snic_event queue */
+ snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
+ if (!snic_glob->event_q) {
+ SNIC_ERR("snic event queue create failed\n");
+ ret = -ENOMEM;
+
+ goto err_eventq;
+ }
+
+ return ret;
+
+err_eventq:
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
+
+err_tmreq_slab:
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
+
+err_max_req_slab:
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
+
+err_dflt_req_slab:
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ snic_trc_free();
+ snic_debugfs_term();
+#endif
+ kfree(snic_glob);
+ snic_glob = NULL;
+
+gdi_end:
+ return ret;
+} /* end of snic_glob_init */
+
+/*
+ * snic_global_data_cleanup : Frees SNIC Global Data
+ */
+static void
+snic_global_data_cleanup(void)
+{
+ SNIC_BUG_ON(snic_glob == NULL);
+
+ destroy_workqueue(snic_glob->event_q);
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
+ kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
+
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+ /* Freeing Trace Resources */
+ snic_trc_free();
+
+ /* Freeing Debugfs Resources */
+ snic_debugfs_term();
+#endif
+ kfree(snic_glob);
+ snic_glob = NULL;
+} /* end of snic_glob_cleanup */
+
+static struct pci_driver snic_driver = {
+ .name = SNIC_DRV_NAME,
+ .id_table = snic_id_table,
+ .probe = snic_probe,
+ .remove = snic_remove,
+};
+
+static int __init
+snic_init_module(void)
+{
+ int ret = 0;
+
+#ifndef __x86_64__
+ SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+#endif
+
+ SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
+
+ ret = snic_global_data_init();
+ if (ret) {
+ SNIC_ERR("Failed to Initialize Global Data.\n");
+
+ return ret;
+ }
+
+ ret = pci_register_driver(&snic_driver);
+ if (ret < 0) {
+ SNIC_ERR("PCI driver register error\n");
+
+ goto err_pci_reg;
+ }
+
+ return ret;
+
+err_pci_reg:
+ snic_global_data_cleanup();
+
+ return ret;
+}
+
+static void __exit
+snic_cleanup_module(void)
+{
+ pci_unregister_driver(&snic_driver);
+ snic_global_data_cleanup();
+}
+
+module_init(snic_init_module);
+module_exit(snic_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
+MODULE_VERSION(SNIC_DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, snic_id_table);
+MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
+ "Sesidhar Baddela <sebaddel@cisco.com>");
diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c
new file mode 100644
index 000000000000..b54912c8ca0c
--- /dev/null
+++ b/drivers/scsi/snic/snic_res.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "wq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "snic.h"
+
+int
+snic_get_vnic_config(struct snic *snic)
+{
+ struct vnic_snic_config *c = &snic->config;
+ int ret;
+
+#define GET_CONFIG(m) \
+ do { \
+ ret = svnic_dev_spec(snic->vdev, \
+ offsetof(struct vnic_snic_config, m), \
+ sizeof(c->m), \
+ &c->m); \
+ if (ret) { \
+ SNIC_HOST_ERR(snic->shost, \
+ "Error getting %s, %d\n", #m, ret); \
+ return ret; \
+ } \
+ } while (0)
+
+ GET_CONFIG(wq_enet_desc_count);
+ GET_CONFIG(maxdatafieldsize);
+ GET_CONFIG(intr_timer);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(flags);
+ GET_CONFIG(io_throttle_count);
+ GET_CONFIG(port_down_timeout);
+ GET_CONFIG(port_down_io_retries);
+ GET_CONFIG(luns_per_tgt);
+ GET_CONFIG(xpt_type);
+ GET_CONFIG(hid);
+
+ c->wq_enet_desc_count = min_t(u32,
+ VNIC_SNIC_WQ_DESCS_MAX,
+ max_t(u32,
+ VNIC_SNIC_WQ_DESCS_MIN,
+ c->wq_enet_desc_count));
+
+ c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
+
+ c->maxdatafieldsize = min_t(u32,
+ VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
+ max_t(u32,
+ VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
+ c->maxdatafieldsize));
+
+ c->io_throttle_count = min_t(u32,
+ VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
+ max_t(u32,
+ VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
+ c->io_throttle_count));
+
+ c->port_down_timeout = min_t(u32,
+ VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
+ c->port_down_timeout);
+
+ c->port_down_io_retries = min_t(u32,
+ VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
+ c->port_down_io_retries);
+
+ c->luns_per_tgt = min_t(u32,
+ VNIC_SNIC_LUNS_PER_TARGET_MAX,
+ max_t(u32,
+ VNIC_SNIC_LUNS_PER_TARGET_MIN,
+ c->luns_per_tgt));
+
+ c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
+
+ SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
+ SNIC_INFO("vNIC mtu %d intr timer %d\n",
+ c->maxdatafieldsize,
+ c->intr_timer);
+
+ SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
+ c->flags,
+ c->luns_per_tgt);
+
+ SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
+ SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
+ c->port_down_timeout,
+ c->port_down_io_retries);
+
+ SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
+ SNIC_INFO("vNIC hid = %d\n", c->hid);
+
+ return 0;
+}
+
+void
+snic_get_res_counts(struct snic *snic)
+{
+ snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
+ SNIC_BUG_ON(snic->wq_count == 0);
+ snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
+ SNIC_BUG_ON(snic->cq_count == 0);
+ snic->intr_count = svnic_dev_get_res_count(snic->vdev,
+ RES_TYPE_INTR_CTRL);
+ SNIC_BUG_ON(snic->intr_count == 0);
+}
+
+void
+snic_free_vnic_res(struct snic *snic)
+{
+ unsigned int i;
+
+ for (i = 0; i < snic->wq_count; i++)
+ svnic_wq_free(&snic->wq[i]);
+
+ for (i = 0; i < snic->cq_count; i++)
+ svnic_cq_free(&snic->cq[i]);
+
+ for (i = 0; i < snic->intr_count; i++)
+ svnic_intr_free(&snic->intr[i]);
+}
+
+int
+snic_alloc_vnic_res(struct snic *snic)
+{
+ enum vnic_dev_intr_mode intr_mode;
+ unsigned int mask_on_assertion;
+ unsigned int intr_offset;
+ unsigned int err_intr_enable;
+ unsigned int err_intr_offset;
+ unsigned int i;
+ int ret;
+
+ intr_mode = svnic_dev_get_intr_mode(snic->vdev);
+
+ SNIC_INFO("vNIC interrupt mode: %s\n",
+ ((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
+ "Legacy PCI INTx" :
+ ((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
+ "MSI" :
+ ((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
+ "MSI-X" : "Unknown"))));
+
+ /* only MSI-X is supported */
+ SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
+
+ SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
+ snic->cq_count,
+ snic->intr_count);
+
+
+ /* Allocate WQs used for SCSI IOs */
+ for (i = 0; i < snic->wq_count; i++) {
+ ret = svnic_wq_alloc(snic->vdev,
+ &snic->wq[i],
+ i,
+ snic->config.wq_enet_desc_count,
+ sizeof(struct wq_enet_desc));
+ if (ret)
+ goto error_cleanup;
+ }
+
+ /* CQ for each WQ */
+ for (i = 0; i < snic->wq_count; i++) {
+ ret = svnic_cq_alloc(snic->vdev,
+ &snic->cq[i],
+ i,
+ snic->config.wq_enet_desc_count,
+ sizeof(struct cq_enet_wq_desc));
+ if (ret)
+ goto error_cleanup;
+ }
+
+ SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
+ /* CQ for FW TO host */
+ for (i = snic->wq_count; i < snic->cq_count; i++) {
+ ret = svnic_cq_alloc(snic->vdev,
+ &snic->cq[i],
+ i,
+ (snic->config.wq_enet_desc_count * 3),
+ sizeof(struct snic_fw_req));
+ if (ret)
+ goto error_cleanup;
+ }
+
+ for (i = 0; i < snic->intr_count; i++) {
+ ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
+ if (ret)
+ goto error_cleanup;
+ }
+
+ /*
+ * Init WQ Resources.
+ * WQ[0 to n] points to CQ[0 to n-1]
+ * firmware to host comm points to CQ[n to m+1]
+ */
+ err_intr_enable = 1;
+ err_intr_offset = snic->err_intr_offset;
+
+ for (i = 0; i < snic->wq_count; i++) {
+ svnic_wq_init(&snic->wq[i],
+ i,
+ err_intr_enable,
+ err_intr_offset);
+ }
+
+ for (i = 0; i < snic->cq_count; i++) {
+ intr_offset = i;
+
+ svnic_cq_init(&snic->cq[i],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 1 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ intr_offset,
+ 0 /* cq_message_addr */);
+ }
+
+ /*
+ * Init INTR resources
+ * Assumption : snic is always in MSI-X mode
+ */
+ SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
+ mask_on_assertion = 1;
+
+ for (i = 0; i < snic->intr_count; i++) {
+ svnic_intr_init(&snic->intr[i],
+ snic->config.intr_timer,
+ snic->config.intr_timer_type,
+ mask_on_assertion);
+ }
+
+ /* init the stats memory by making the first call here */
+ ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "svnic_dev_stats_dump failed - x%x\n",
+ ret);
+ goto error_cleanup;
+ }
+
+ /* Clear LIF stats */
+ svnic_dev_stats_clear(snic->vdev);
+ ret = 0;
+
+ return ret;
+
+error_cleanup:
+ snic_free_vnic_res(snic);
+
+ return ret;
+}
+
+void
+snic_log_q_error(struct snic *snic)
+{
+ unsigned int i;
+ u32 err_status;
+
+ for (i = 0; i < snic->wq_count; i++) {
+ err_status = ioread32(&snic->wq[i].ctrl->error_status);
+ if (err_status)
+ SNIC_HOST_ERR(snic->shost,
+ "WQ[%d] error status %d\n",
+ i,
+ err_status);
+ }
+} /* end of snic_log_q_error */
diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h
new file mode 100644
index 000000000000..273f72f2a023
--- /dev/null
+++ b/drivers/scsi/snic/snic_res.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __SNIC_RES_H
+#define __SNIC_RES_H
+
+#include "snic_io.h"
+#include "wq_enet_desc.h"
+#include "vnic_wq.h"
+#include "snic_fwint.h"
+#include "vnic_cq_fw.h"
+
+static inline void
+snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
+ u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
+ u32 data_len, u16 sg_cnt, ulong sgl_addr,
+ dma_addr_t sns_addr_pa, u32 sense_len)
+{
+ snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
+ ctx);
+
+ req->u.icmnd.flags = cpu_to_le16(flags);
+ req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
+ memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
+ req->u.icmnd.cdb_len = cdb_len;
+ memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
+ memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
+ req->u.icmnd.data_len = cpu_to_le32(data_len);
+ req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
+ req->u.icmnd.sense_len = cpu_to_le32(sense_len);
+ req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
+}
+
+static inline void
+snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
+ u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
+{
+ snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
+
+ req->u.itmf.tm_type = tm_type;
+ req->u.itmf.flags = cpu_to_le16(flags);
+ /* req_id valid only in abort, clear task */
+ req->u.itmf.req_id = cpu_to_le32(req_id);
+ req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
+ memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
+}
+
+static inline void
+snic_queue_wq_eth_desc(struct vnic_wq *wq,
+ void *os_buf,
+ dma_addr_t dma_addr,
+ unsigned int len,
+ int vlan_tag_insert,
+ unsigned int vlan_tag,
+ int cq_entry)
+{
+ struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ 0, /* mss_or_csum_offset */
+ 0, /* fc_eof */
+ 0, /* offload mode */
+ 1, /* eop */
+ (u8)cq_entry,
+ 0, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ 0 /* loopback */);
+
+ svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
+}
+
+struct snic;
+
+int snic_get_vnic_config(struct snic *);
+int snic_alloc_vnic_res(struct snic *);
+void snic_free_vnic_res(struct snic *);
+void snic_get_res_counts(struct snic *);
+void snic_log_q_error(struct snic *);
+int snic_get_vnic_resources_size(struct snic *);
+#endif /* __SNIC_RES_H */
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
new file mode 100644
index 000000000000..2c7b4c321cbe
--- /dev/null
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -0,0 +1,2632 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+
+#include "snic_io.h"
+#include "snic.h"
+
+#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
+
+const char *snic_state_str[] = {
+ [SNIC_INIT] = "SNIC_INIT",
+ [SNIC_ERROR] = "SNIC_ERROR",
+ [SNIC_ONLINE] = "SNIC_ONLINE",
+ [SNIC_OFFLINE] = "SNIC_OFFLINE",
+ [SNIC_FWRESET] = "SNIC_FWRESET",
+};
+
+static const char * const snic_req_state_str[] = {
+ [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
+ [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
+ [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
+ [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE",
+ [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
+ [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE",
+ [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE",
+};
+
+/* snic cmd status strings */
+static const char * const snic_io_status_str[] = {
+ [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */
+ [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
+ [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES",
+ [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
+ [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP",
+ [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
+ [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED",
+ [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT",
+ [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
+ [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
+ [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR",
+ [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
+ [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL",
+ [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
+ [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
+ [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
+ [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN",
+ [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR",
+ [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY",
+ [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR",
+};
+
+static void snic_scsi_cleanup(struct snic *, int);
+
+const char *
+snic_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
+ return "Unknown";
+
+ return snic_state_str[state];
+}
+
+static const char *
+snic_io_status_to_str(unsigned int state)
+{
+ if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
+ (!snic_io_status_str[state]))
+ return "Unknown";
+
+ return snic_io_status_str[state];
+}
+
+static const char *
+snic_ioreq_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(snic_req_state_str) ||
+ !snic_req_state_str[state])
+ return "Unknown";
+
+ return snic_req_state_str[state];
+}
+
+static inline spinlock_t *
+snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
+{
+ u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
+
+ return &snic->io_req_lock[hash];
+}
+
+static inline spinlock_t *
+snic_io_lock_tag(struct snic *snic, int tag)
+{
+ return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
+}
+
+/* snic_release_req_buf : Releases snic_req_info */
+static void
+snic_release_req_buf(struct snic *snic,
+ struct snic_req_info *rqi,
+ struct scsi_cmnd *sc)
+{
+ struct snic_host_req *req = rqi_to_req(rqi);
+
+ /* Freeing cmd without marking completion, not okay */
+ SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
+ (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
+ (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
+ (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
+ (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
+ (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
+ (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
+
+ SNIC_SCSI_DBG(snic->shost,
+ "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
+ sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
+ rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
+ CMD_FLAGS(sc));
+
+ if (req->u.icmnd.sense_addr)
+ pci_unmap_single(snic->pdev,
+ le64_to_cpu(req->u.icmnd.sense_addr),
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ scsi_dma_unmap(sc);
+
+ snic_req_free(snic, rqi);
+} /* end of snic_release_req_buf */
+
+/*
+ * snic_queue_icmnd_req : Queues snic_icmnd request
+ */
+static int
+snic_queue_icmnd_req(struct snic *snic,
+ struct snic_req_info *rqi,
+ struct scsi_cmnd *sc,
+ int sg_cnt)
+{
+ struct scatterlist *sg;
+ struct snic_sg_desc *sgd;
+ dma_addr_t pa = 0;
+ struct scsi_lun lun;
+ u16 flags = 0;
+ int ret = 0;
+ unsigned int i;
+
+ if (sg_cnt) {
+ flags = SNIC_ICMND_ESGL;
+ sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
+
+ for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
+ sgd->addr = cpu_to_le64(sg_dma_address(sg));
+ sgd->len = cpu_to_le32(sg_dma_len(sg));
+ sgd->_resvd = 0;
+ sgd++;
+ }
+ }
+
+ pa = pci_map_single(snic->pdev,
+ sc->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(snic->pdev, pa)) {
+ SNIC_HOST_ERR(snic->shost,
+ "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
+ sc->sense_buffer, snic_cmd_tag(sc));
+ ret = -ENOMEM;
+
+ return ret;
+ }
+
+ int_to_scsilun(sc->device->lun, &lun);
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ flags |= SNIC_ICMND_RD;
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
+ flags |= SNIC_ICMND_WR;
+
+ /* Initialize icmnd */
+ snic_icmnd_init(rqi->req,
+ snic_cmd_tag(sc),
+ snic->config.hid, /* hid */
+ (ulong) rqi,
+ flags, /* command flags */
+ rqi->tgt_id,
+ lun.scsi_lun,
+ sc->cmnd,
+ sc->cmd_len,
+ scsi_bufflen(sc),
+ sg_cnt,
+ (ulong) req_to_sgl(rqi->req),
+ pa, /* sense buffer pa */
+ SCSI_SENSE_BUFFERSIZE);
+
+ ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
+ if (ret)
+ SNIC_HOST_ERR(snic->shost,
+ "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
+ ret);
+
+ return ret;
+} /* end of snic_queue_icmnd_req */
+
+/*
+ * snic_issue_scsi_req : Prepares IO request and Issues to FW.
+ */
+static int
+snic_issue_scsi_req(struct snic *snic,
+ struct snic_tgt *tgt,
+ struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ int sg_cnt = 0;
+ int ret = 0;
+ u32 tag = snic_cmd_tag(sc);
+ u64 cmd_trc = 0, cmd_st_flags = 0;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+
+ CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = SNIC_NO_FLAGS;
+ sg_cnt = scsi_dma_map(sc);
+ if (sg_cnt < 0) {
+ SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
+ sc->cmnd[0], sg_cnt, CMD_STATE(sc));
+
+ SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
+ ret = -ENOMEM;
+
+ goto issue_sc_end;
+ }
+
+ rqi = snic_req_init(snic, sg_cnt);
+ if (!rqi) {
+ scsi_dma_unmap(sc);
+ ret = -ENOMEM;
+
+ goto issue_sc_end;
+ }
+
+ rqi->tgt_id = tgt->id;
+ rqi->sc = sc;
+
+ CMD_STATE(sc) = SNIC_IOREQ_PENDING;
+ CMD_SP(sc) = (char *) rqi;
+ cmd_trc = SNIC_TRC_CMD(sc);
+ CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
+ cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
+ io_lock = snic_io_lock_hash(snic, sc);
+
+ /* create wq desc and enqueue it */
+ ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "issue_sc: icmnd qing Failed for sc %p, err %d\n",
+ sc, ret);
+
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ CMD_SP(sc) = NULL;
+ CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
+ CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (rqi)
+ snic_release_req_buf(snic, rqi, sc);
+
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
+ } else {
+ u32 io_sz = scsi_bufflen(sc) >> 9;
+ u32 qtime = jiffies - rqi->start_time;
+ struct snic_io_stats *iostats = &snic->s_stats.io;
+
+ if (io_sz > atomic64_read(&iostats->max_io_sz))
+ atomic64_set(&iostats->max_io_sz, io_sz);
+
+ if (qtime > atomic64_read(&iostats->max_qtime))
+ atomic64_set(&iostats->max_qtime, qtime);
+
+ SNIC_SCSI_DBG(snic->shost,
+ "issue_sc:sc %p, tag %d queued to WQ.\n",
+ sc, tag);
+
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
+ sg_cnt, cmd_trc, cmd_st_flags);
+ }
+
+issue_sc_end:
+
+ return ret;
+} /* end of snic_issue_scsi_req */
+
+
+/*
+ * snic_queuecommand
+ * Routine to send a scsi cdb to LLD
+ * Called with host_lock held and interrupts disabled
+ */
+int
+snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
+{
+ struct snic_tgt *tgt = NULL;
+ struct snic *snic = shost_priv(shost);
+ int ret;
+
+ tgt = starget_to_tgt(scsi_target(sc->device));
+ ret = snic_tgt_chkready(tgt);
+ if (ret) {
+ SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
+ atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
+ sc->result = ret;
+ sc->scsi_done(sc);
+
+ return 0;
+ }
+
+ if (snic_get_state(snic) != SNIC_ONLINE) {
+ SNIC_HOST_ERR(shost, "snic state is %s\n",
+ snic_state_str[snic_get_state(snic)]);
+
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ atomic_inc(&snic->ios_inflight);
+
+ SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
+ sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
+
+ memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state));
+
+ ret = snic_issue_scsi_req(snic, tgt, sc);
+ if (ret) {
+ SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ } else
+ snic_stats_update_active_ios(&snic->s_stats);
+
+ atomic_dec(&snic->ios_inflight);
+
+ return ret;
+} /* end of snic_queuecommand */
+
+/*
+ * snic_process_abts_pending_state:
+ * caller should hold IO lock
+ */
+static void
+snic_proc_tmreq_pending_state(struct snic *snic,
+ struct scsi_cmnd *sc,
+ u8 cmpl_status)
+{
+ int state = CMD_STATE(sc);
+
+ if (state == SNIC_IOREQ_ABTS_PENDING)
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
+ else if (state == SNIC_IOREQ_LR_PENDING)
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
+ else
+ SNIC_BUG_ON(1);
+
+ switch (cmpl_status) {
+ case SNIC_STAT_IO_SUCCESS:
+ CMD_FLAGS(sc) |= SNIC_IO_DONE;
+ break;
+
+ case SNIC_STAT_ABORTED:
+ CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
+ break;
+
+ default:
+ SNIC_BUG_ON(1);
+ }
+}
+
+/*
+ * snic_process_io_failed_state:
+ * Processes IO's error states
+ */
+static void
+snic_process_io_failed_state(struct snic *snic,
+ struct snic_icmnd_cmpl *icmnd_cmpl,
+ struct scsi_cmnd *sc,
+ u8 cmpl_stat)
+{
+ int res = 0;
+
+ switch (cmpl_stat) {
+ case SNIC_STAT_TIMEOUT: /* Req was timedout */
+ atomic64_inc(&snic->s_stats.misc.io_tmo);
+ res = DID_TIME_OUT;
+ break;
+
+ case SNIC_STAT_ABORTED: /* Req was aborted */
+ atomic64_inc(&snic->s_stats.misc.io_aborted);
+ res = DID_ABORT;
+ break;
+
+ case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
+ atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
+ scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
+ res = DID_ERROR;
+ break;
+
+ case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
+ atomic64_inc(&snic->s_stats.fw.out_of_res);
+ res = DID_REQUEUE;
+ break;
+
+ case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */
+ atomic64_inc(&snic->s_stats.io.io_not_found);
+ res = DID_ERROR;
+ break;
+
+ case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/
+ atomic64_inc(&snic->s_stats.misc.sgl_inval);
+ res = DID_ERROR;
+ break;
+
+ case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */
+ atomic64_inc(&snic->s_stats.fw.io_errs);
+ res = DID_ERROR;
+ break;
+
+ case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */
+ atomic64_inc(&snic->s_stats.fw.scsi_errs);
+ break;
+
+ case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
+ case SNIC_STAT_DEV_OFFLINE: /* Device offline */
+ res = DID_NO_CONNECT;
+ break;
+
+ case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */
+ case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */
+ case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */
+ case SNIC_STAT_CMND_REJECT: /* Req rejected */
+ case SNIC_STAT_FATAL_ERROR: /* XPT Error */
+ default:
+ SNIC_SCSI_DBG(snic->shost,
+ "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
+ res = DID_ERROR;
+ break;
+ }
+
+ SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
+ snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
+
+ /* Set sc->result */
+ sc->result = (res << 16) | icmnd_cmpl->scsi_status;
+} /* end of snic_process_io_failed_state */
+
+/*
+ * snic_tmreq_pending : is task management in progress.
+ */
+static int
+snic_tmreq_pending(struct scsi_cmnd *sc)
+{
+ int state = CMD_STATE(sc);
+
+ return ((state == SNIC_IOREQ_ABTS_PENDING) ||
+ (state == SNIC_IOREQ_LR_PENDING));
+}
+
+/*
+ * snic_process_icmnd_cmpl_status:
+ * Caller should hold io_lock
+ */
+static int
+snic_process_icmnd_cmpl_status(struct snic *snic,
+ struct snic_icmnd_cmpl *icmnd_cmpl,
+ u8 cmpl_stat,
+ struct scsi_cmnd *sc)
+{
+ u8 scsi_stat = icmnd_cmpl->scsi_status;
+ u64 xfer_len = 0;
+ int ret = 0;
+
+ /* Mark the IO as complete */
+ CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
+
+ if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
+ sc->result = (DID_OK << 16) | scsi_stat;
+
+ xfer_len = scsi_bufflen(sc);
+
+ /* Update SCSI Cmd with resid value */
+ scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
+
+ if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
+ xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
+ atomic64_inc(&snic->s_stats.misc.io_under_run);
+ }
+
+ if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+ atomic64_inc(&snic->s_stats.misc.qfull);
+
+ ret = 0;
+ } else {
+ snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
+ atomic64_inc(&snic->s_stats.io.fail);
+ SNIC_HOST_ERR(snic->shost,
+ "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
+ snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
+ ret = 1;
+ }
+
+ return ret;
+} /* end of snic_process_icmnd_cmpl_status */
+
+
+/*
+ * snic_icmnd_cmpl_handler
+ * Routine to handle icmnd completions
+ */
+static void
+snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ u8 typ, hdr_stat;
+ u32 cmnd_id, hid;
+ ulong ctx;
+ struct scsi_cmnd *sc = NULL;
+ struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
+ struct snic_host_req *req = NULL;
+ struct snic_req_info *rqi = NULL;
+ unsigned long flags, start_time;
+ spinlock_t *io_lock;
+ u8 sc_stat = 0;
+
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+ icmnd_cmpl = &fwreq->u.icmnd_cmpl;
+ sc_stat = icmnd_cmpl->scsi_status;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
+ typ, hdr_stat, cmnd_id, hid, ctx);
+
+ if (cmnd_id >= snic->max_tag_id) {
+ SNIC_HOST_ERR(snic->shost,
+ "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
+ cmnd_id, snic_io_status_to_str(hdr_stat));
+ return;
+ }
+
+ sc = scsi_host_find_tag(snic->shost, cmnd_id);
+ WARN_ON_ONCE(!sc);
+
+ if (!sc) {
+ atomic64_inc(&snic->s_stats.io.sc_null);
+ SNIC_HOST_ERR(snic->shost,
+ "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
+ snic_io_status_to_str(hdr_stat),
+ cmnd_id,
+ fwreq);
+
+ SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
+ ((u64)hdr_stat << 16 |
+ (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
+ (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
+
+ return;
+ }
+
+ io_lock = snic_io_lock_hash(snic, sc);
+
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ SNIC_SCSI_DBG(snic->shost,
+ "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
+ sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
+ CMD_FLAGS(sc), rqi);
+
+ SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
+ WARN_ON_ONCE(req);
+ if (!rqi) {
+ atomic64_inc(&snic->s_stats.io.req_null);
+ CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ SNIC_HOST_ERR(snic->shost,
+ "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
+ snic_io_status_to_str(hdr_stat),
+ cmnd_id, sc, CMD_FLAGS(sc));
+ return;
+ }
+
+ rqi = (struct snic_req_info *) ctx;
+ start_time = rqi->start_time;
+
+ /* firmware completed the io */
+ rqi->io_cmpl = 1;
+
+ /*
+ * if SCSI-ML has already issued abort on this command,
+ * ignore completion of the IO. The abts path will clean it up
+ */
+ if (unlikely(snic_tmreq_pending(sc))) {
+ snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ snic_stats_update_io_cmpl(&snic->s_stats);
+
+ /* Expected value is SNIC_STAT_ABORTED */
+ if (likely(hdr_stat == SNIC_STAT_ABORTED))
+ return;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
+ snic_ioreq_state_to_str(CMD_STATE(sc)),
+ snic_io_status_to_str(hdr_stat),
+ sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
+ CMD_FLAGS(sc));
+
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
+ SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ return;
+ }
+
+ if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
+ scsi_print_command(sc);
+ SNIC_HOST_ERR(snic->shost,
+ "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
+ sc, sc->cmnd[0], cmnd_id,
+ snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
+ }
+
+ /* Break link with the SCSI Command */
+ CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= SNIC_IO_DONE;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* For now, consider only successful IO. */
+ snic_calc_io_process_time(snic, rqi);
+
+ snic_release_req_buf(snic, rqi, sc);
+
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
+ SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+
+ snic_stats_update_io_cmpl(&snic->s_stats);
+} /* end of snic_icmnd_cmpl_handler */
+
+static void
+snic_proc_dr_cmpl_locked(struct snic *snic,
+ struct snic_fw_req *fwreq,
+ u8 cmpl_stat,
+ u32 cmnd_id,
+ struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
+ u32 start_time = rqi->start_time;
+
+ CMD_LR_STATUS(sc) = cmpl_stat;
+
+ SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
+ snic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
+
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
+ (int)(cmnd_id & SNIC_TAG_MASK),
+ snic_io_status_to_str(cmpl_stat),
+ CMD_FLAGS(sc));
+
+ return;
+ }
+
+
+ if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
+ (int)(cmnd_id & SNIC_TAG_MASK),
+ snic_io_status_to_str(cmpl_stat),
+ CMD_FLAGS(sc));
+
+ return;
+ }
+
+ CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
+ (int)(cmnd_id & SNIC_TAG_MASK),
+ snic_io_status_to_str(cmpl_stat),
+ CMD_FLAGS(sc));
+
+ if (rqi->dr_done)
+ complete(rqi->dr_done);
+} /* end of snic_proc_dr_cmpl_locked */
+
+/*
+ * snic_update_abort_stats : Updates abort stats based on completion status.
+ */
+static void
+snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
+{
+ struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
+
+ SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
+
+ switch (cmpl_stat) {
+ case SNIC_STAT_IO_SUCCESS:
+ break;
+
+ case SNIC_STAT_TIMEOUT:
+ atomic64_inc(&abt_stats->fw_tmo);
+ break;
+
+ case SNIC_STAT_IO_NOT_FOUND:
+ atomic64_inc(&abt_stats->io_not_found);
+ break;
+
+ default:
+ atomic64_inc(&abt_stats->fail);
+ break;
+ }
+}
+
+static int
+snic_process_itmf_cmpl(struct snic *snic,
+ struct snic_fw_req *fwreq,
+ u32 cmnd_id,
+ u8 cmpl_stat,
+ struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ u32 tm_tags = 0;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ u32 start_time = 0;
+ int ret = 0;
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ WARN_ON_ONCE(!rqi);
+
+ if (!rqi) {
+ atomic64_inc(&snic->s_stats.io.req_null);
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
+ SNIC_HOST_ERR(snic->shost,
+ "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
+ snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
+ CMD_FLAGS(sc));
+
+ return ret;
+ }
+
+ /* Extract task management flags */
+ tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
+
+ start_time = rqi->start_time;
+ cmnd_id &= (SNIC_TAG_MASK);
+
+ switch (tm_tags) {
+ case SNIC_TAG_ABORT:
+ /* Abort only issued on cmd */
+ snic_update_abort_stats(snic, cmpl_stat);
+
+ if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
+ /* This is a late completion. Ignore it. */
+ ret = -1;
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = cmpl_stat;
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
+ cmnd_id,
+ snic_io_status_to_str(cmpl_stat),
+ CMD_FLAGS(sc));
+
+ /*
+ * If scsi_eh thread is blocked waiting for abts complete,
+ * signal completion to it. IO will be cleaned in the thread,
+ * else clean it in this context.
+ */
+ if (rqi->abts_done) {
+ complete(rqi->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ break; /* jump out */
+ }
+
+ CMD_SP(sc) = NULL;
+ sc->result = (DID_ERROR << 16);
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
+ sc, CMD_FLAGS(sc));
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ snic_release_req_buf(snic, rqi, sc);
+
+ if (sc->scsi_done) {
+ SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ (ulong) fwreq, SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ sc->scsi_done(sc);
+ }
+
+ break;
+
+ case SNIC_TAG_DEV_RST:
+ case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
+ snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 0;
+
+ break;
+
+ case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
+ /* Abort and terminate completion of device reset req */
+
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = cmpl_stat;
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
+ cmnd_id, snic_io_status_to_str(cmpl_stat),
+ CMD_FLAGS(sc));
+
+ if (rqi->abts_done)
+ complete(rqi->abts_done);
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ break;
+
+ default:
+ spin_unlock_irqrestore(io_lock, flags);
+ SNIC_HOST_ERR(snic->shost,
+ "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
+
+ SNIC_HOST_ERR(snic->shost,
+ "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
+ snic_ioreq_state_to_str(CMD_STATE(sc)),
+ cmnd_id,
+ CMD_FLAGS(sc));
+ ret = -1;
+ SNIC_BUG_ON(1);
+
+ break;
+ }
+
+ return ret;
+} /* end of snic_process_itmf_cmpl_status */
+
+/*
+ * snic_itmf_cmpl_handler.
+ * Routine to handle itmf completions.
+ */
+static void
+snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ struct scsi_cmnd *sc = NULL;
+ struct snic_req_info *rqi = NULL;
+ struct snic_itmf_cmpl *itmf_cmpl = NULL;
+ ulong ctx;
+ u32 cmnd_id;
+ u32 hid;
+ u8 typ;
+ u8 hdr_stat;
+
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+ SNIC_SCSI_DBG(snic->shost,
+ "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
+ __func__, typ, hdr_stat, cmnd_id, hid, ctx);
+
+ itmf_cmpl = &fwreq->u.itmf_cmpl;
+ SNIC_SCSI_DBG(snic->shost,
+ "Itmf_cmpl: nterm %u , flags 0x%x\n",
+ le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
+
+ /* spl case, dev reset issued through ioctl */
+ if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
+ rqi = (struct snic_req_info *) ctx;
+ sc = rqi->sc;
+
+ goto ioctl_dev_rst;
+ }
+
+ if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
+ SNIC_HOST_ERR(snic->shost,
+ "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
+ cmnd_id, snic_io_status_to_str(hdr_stat));
+ SNIC_BUG_ON(1);
+
+ return;
+ }
+
+ sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
+ WARN_ON_ONCE(!sc);
+
+ioctl_dev_rst:
+ if (!sc) {
+ atomic64_inc(&snic->s_stats.io.sc_null);
+ SNIC_HOST_ERR(snic->shost,
+ "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
+ snic_io_status_to_str(hdr_stat), cmnd_id);
+
+ return;
+ }
+
+ snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
+} /* end of snic_itmf_cmpl_handler */
+
+
+
+static void
+snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_stats *st = &snic->s_stats;
+ long act_ios = 0, act_fwreqs = 0;
+
+ SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
+ snic_scsi_cleanup(snic, snic_cmd_tag(sc));
+
+ /* Update stats on pending IOs */
+ act_ios = atomic64_read(&st->io.active);
+ atomic64_add(act_ios, &st->io.compl);
+ atomic64_sub(act_ios, &st->io.active);
+
+ act_fwreqs = atomic64_read(&st->fw.actv_reqs);
+ atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
+}
+
+/*
+ * snic_hba_reset_cmpl_handler :
+ *
+ * Notes :
+ * 1. Cleanup all the scsi cmds, release all snic specific cmds
+ * 2. Issue Report Targets in case of SAN targets
+ */
+static int
+snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ ulong ctx;
+ u32 cmnd_id;
+ u32 hid;
+ u8 typ;
+ u8 hdr_stat;
+ struct scsi_cmnd *sc = NULL;
+ struct snic_req_info *rqi = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags, gflags;
+ int ret = 0;
+
+ SNIC_HOST_INFO(snic->shost,
+ "reset_cmpl:HBA Reset Completion received.\n");
+
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+ SNIC_SCSI_DBG(snic->shost,
+ "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
+ typ, hdr_stat, cmnd_id, hid, ctx);
+
+ /* spl case, host reset issued through ioctl */
+ if (cmnd_id == SCSI_NO_TAG) {
+ rqi = (struct snic_req_info *) ctx;
+ sc = rqi->sc;
+
+ goto ioctl_hba_rst;
+ }
+
+ if (cmnd_id >= snic->max_tag_id) {
+ SNIC_HOST_ERR(snic->shost,
+ "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
+ cmnd_id, snic_io_status_to_str(hdr_stat));
+ SNIC_BUG_ON(1);
+
+ return 1;
+ }
+
+ sc = scsi_host_find_tag(snic->shost, cmnd_id);
+ioctl_hba_rst:
+ if (!sc) {
+ atomic64_inc(&snic->s_stats.io.sc_null);
+ SNIC_HOST_ERR(snic->shost,
+ "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
+ snic_io_status_to_str(hdr_stat), cmnd_id);
+ ret = 1;
+
+ return ret;
+ }
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ if (!snic->remove_wait) {
+ spin_unlock_irqrestore(io_lock, flags);
+ SNIC_HOST_ERR(snic->shost,
+ "reset_cmpl:host reset completed after timout\n");
+ ret = 1;
+
+ return ret;
+ }
+
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ WARN_ON_ONCE(!rqi);
+
+ if (!rqi) {
+ atomic64_inc(&snic->s_stats.io.req_null);
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
+ SNIC_HOST_ERR(snic->shost,
+ "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
+ snic_io_status_to_str(hdr_stat), cmnd_id, sc,
+ CMD_FLAGS(sc));
+
+ ret = 1;
+
+ return ret;
+ }
+ /* stats */
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* scsi cleanup */
+ snic_hba_reset_scsi_cleanup(snic, sc);
+
+ SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
+ snic_get_state(snic) != SNIC_FWRESET);
+
+ /* Careful locking between snic_lock and io lock */
+ spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&snic->snic_lock, gflags);
+ if (snic_get_state(snic) == SNIC_FWRESET)
+ snic_set_state(snic, SNIC_ONLINE);
+ spin_unlock_irqrestore(&snic->snic_lock, gflags);
+
+ if (snic->remove_wait)
+ complete(snic->remove_wait);
+
+ spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
+
+ ret = 0;
+ /* Rediscovery is for SAN */
+ if (snic->config.xpt_type == SNIC_DAS)
+ return ret;
+
+ SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
+ queue_work(snic_glob->event_q, &snic->disc_work);
+
+ return ret;
+}
+
+static void
+snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
+
+ SNIC_ASSERT_NOT_IMPL(1);
+}
+
+static void
+snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
+{
+ u8 typ, hdr_stat;
+ u32 cmnd_id, hid;
+ ulong ctx;
+ struct snic_async_evnotify *aen = &fwreq->u.async_ev;
+ u32 event_id = 0;
+
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+ SNIC_SCSI_DBG(snic->shost,
+ "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
+ typ, hdr_stat, cmnd_id, hid, ctx);
+
+ event_id = le32_to_cpu(aen->ev_id);
+
+ switch (event_id) {
+ case SNIC_EV_TGT_OFFLINE:
+ SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
+ break;
+
+ case SNIC_EV_TGT_ONLINE:
+ SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
+ break;
+
+ case SNIC_EV_LUN_OFFLINE:
+ SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
+ break;
+
+ case SNIC_EV_LUN_ONLINE:
+ SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
+ break;
+
+ case SNIC_EV_CONF_CHG:
+ SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
+ break;
+
+ case SNIC_EV_TGT_ADDED:
+ SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
+ break;
+
+ case SNIC_EV_TGT_DELTD:
+ SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
+ break;
+
+ case SNIC_EV_LUN_ADDED:
+ SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
+ break;
+
+ case SNIC_EV_LUN_DELTD:
+ SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
+ break;
+
+ case SNIC_EV_DISC_CMPL:
+ SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
+ break;
+
+ default:
+ SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
+ SNIC_BUG_ON(1);
+ break;
+ }
+
+ SNIC_ASSERT_NOT_IMPL(1);
+} /* end of snic_aen_handler */
+
+/*
+ * snic_io_cmpl_handler
+ * Routine to process CQ entries(IO Completions) posted by fw.
+ */
+static int
+snic_io_cmpl_handler(struct vnic_dev *vdev,
+ unsigned int cq_idx,
+ struct snic_fw_req *fwreq)
+{
+ struct snic *snic = svnic_dev_priv(vdev);
+ u64 start = jiffies, cmpl_time;
+
+ snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
+
+ /* Update FW Stats */
+ if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
+ (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
+ atomic64_dec(&snic->s_stats.fw.actv_reqs);
+
+ SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
+ (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
+
+ /* Check for snic subsys errors */
+ switch (fwreq->hdr.status) {
+ case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
+ SNIC_HOST_ERR(snic->shost,
+ "sNIC SubSystem is NOT Ready.\n");
+ break;
+
+ case SNIC_STAT_FATAL_ERROR: /* XPT Error */
+ SNIC_HOST_ERR(snic->shost,
+ "sNIC SubSystem in Unrecoverable State.\n");
+ break;
+ }
+
+ switch (fwreq->hdr.type) {
+ case SNIC_RSP_EXCH_VER_CMPL:
+ snic_io_exch_ver_cmpl_handler(snic, fwreq);
+ break;
+
+ case SNIC_RSP_REPORT_TGTS_CMPL:
+ snic_report_tgt_cmpl_handler(snic, fwreq);
+ break;
+
+ case SNIC_RSP_ICMND_CMPL:
+ snic_icmnd_cmpl_handler(snic, fwreq);
+ break;
+
+ case SNIC_RSP_ITMF_CMPL:
+ snic_itmf_cmpl_handler(snic, fwreq);
+ break;
+
+ case SNIC_RSP_HBA_RESET_CMPL:
+ snic_hba_reset_cmpl_handler(snic, fwreq);
+ break;
+
+ case SNIC_MSG_ACK:
+ snic_msg_ack_handler(snic, fwreq);
+ break;
+
+ case SNIC_MSG_ASYNC_EVNOTIFY:
+ snic_aen_handler(snic, fwreq);
+ break;
+
+ default:
+ SNIC_BUG_ON(1);
+ SNIC_SCSI_DBG(snic->shost,
+ "Unknown Firmwqre completion request type %d\n",
+ fwreq->hdr.type);
+ break;
+ }
+
+ /* Update Stats */
+ cmpl_time = jiffies - start;
+ if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
+ atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
+
+ return 0;
+} /* end of snic_io_cmpl_handler */
+
+/*
+ * snic_fwcq_cmpl_handler
+ * Routine to process fwCQ
+ * This CQ is independent, and not associated with wq/rq/wq_copy queues
+ */
+int
+snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
+{
+ unsigned int num_ent = 0; /* number cq entries processed */
+ unsigned int cq_idx;
+ unsigned int nent_per_cq;
+ struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
+
+ for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
+ nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
+ snic_io_cmpl_handler,
+ io_cmpl_work);
+ num_ent += nent_per_cq;
+
+ if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
+ atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
+ }
+
+ return num_ent;
+} /* end of snic_fwcq_cmpl_handler */
+
+/*
+ * snic_queue_itmf_req: Common API to queue Task Management requests.
+ * Use rqi->tm_tag for passing special tags.
+ * @req_id : aborted request's tag, -1 for lun reset.
+ */
+static int
+snic_queue_itmf_req(struct snic *snic,
+ struct snic_host_req *tmreq,
+ struct scsi_cmnd *sc,
+ u32 tmf,
+ u32 req_id)
+{
+ struct snic_req_info *rqi = req_to_rqi(tmreq);
+ struct scsi_lun lun;
+ int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
+ int ret = 0;
+
+ SNIC_BUG_ON(!rqi);
+ SNIC_BUG_ON(!rqi->tm_tag);
+
+ /* fill in lun info */
+ int_to_scsilun(sc->device->lun, &lun);
+
+ /* Initialize snic_host_req: itmf */
+ snic_itmf_init(tmreq,
+ tm_tag,
+ snic->config.hid,
+ (ulong) rqi,
+ 0 /* flags */,
+ req_id, /* Command to be aborted. */
+ rqi->tgt_id,
+ lun.scsi_lun,
+ tmf);
+
+ /*
+ * In case of multiple aborts on same cmd,
+ * use try_wait_for_completion and completion_done() to check
+ * whether it queues aborts even after completion of abort issued
+ * prior.SNIC_BUG_ON(completion_done(&rqi->done));
+ */
+
+ ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
+ if (ret)
+ SNIC_HOST_ERR(snic->shost,
+ "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
+ tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
+ else
+ SNIC_SCSI_DBG(snic->shost,
+ "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
+ tmf, sc, rqi, req_id, snic_cmd_tag(sc));
+
+ return ret;
+} /* end of snic_queue_itmf_req */
+
+static int
+snic_issue_tm_req(struct snic *snic,
+ struct snic_req_info *rqi,
+ struct scsi_cmnd *sc,
+ int tmf)
+{
+ struct snic_host_req *tmreq = NULL;
+ int req_id = 0, tag = snic_cmd_tag(sc);
+ int ret = 0;
+
+ if (snic_get_state(snic) == SNIC_FWRESET)
+ return -EBUSY;
+
+ atomic_inc(&snic->ios_inflight);
+
+ SNIC_SCSI_DBG(snic->shost,
+ "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
+ tmf, rqi, tag);
+
+
+ if (tmf == SNIC_ITMF_LUN_RESET) {
+ tmreq = snic_dr_req_init(snic, rqi);
+ req_id = SCSI_NO_TAG;
+ } else {
+ tmreq = snic_abort_req_init(snic, rqi);
+ req_id = tag;
+ }
+
+ if (!tmreq) {
+ ret = -ENOMEM;
+
+ goto tmreq_err;
+ }
+
+ ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
+ if (ret)
+ goto tmreq_err;
+
+ ret = 0;
+
+tmreq_err:
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
+ tmf, sc, rqi, req_id, tag, ret);
+ } else {
+ SNIC_SCSI_DBG(snic->shost,
+ "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
+ tmf, sc, rqi, req_id, tag);
+ }
+
+ atomic_dec(&snic->ios_inflight);
+
+ return ret;
+}
+
+/*
+ * snic_queue_abort_req : Queues abort req to WQ
+ */
+static int
+snic_queue_abort_req(struct snic *snic,
+ struct snic_req_info *rqi,
+ struct scsi_cmnd *sc,
+ int tmf)
+{
+ SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
+ sc, rqi, snic_cmd_tag(sc), tmf);
+
+ /* Add special tag for abort */
+ rqi->tm_tag |= SNIC_TAG_ABORT;
+
+ return snic_issue_tm_req(snic, rqi, sc, tmf);
+}
+
+/*
+ * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
+ */
+static int
+snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ int ret = 0, tag = snic_cmd_tag(sc);
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ atomic64_inc(&snic->s_stats.io.req_null);
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
+ tag, sc, CMD_FLAGS(sc));
+ ret = FAILED;
+
+ goto abort_fail;
+ }
+
+ rqi->abts_done = NULL;
+
+ ret = FAILED;
+
+ /* Check the abort status. */
+ switch (CMD_ABTS_STATUS(sc)) {
+ case SNIC_INVALID_CODE:
+ /* Firmware didn't complete abort req, timedout */
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
+ atomic64_inc(&snic->s_stats.abts.drv_tmo);
+ SNIC_SCSI_DBG(snic->shost,
+ "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
+ sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
+ /* do not release snic request in timedout case */
+ rqi = NULL;
+
+ goto abort_fail;
+
+ case SNIC_STAT_IO_SUCCESS:
+ case SNIC_STAT_IO_NOT_FOUND:
+ ret = SUCCESS;
+ break;
+
+ default:
+ /* Firmware completed abort with error */
+ ret = FAILED;
+ break;
+ }
+
+ CMD_SP(sc) = NULL;
+ SNIC_HOST_INFO(snic->shost,
+ "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
+ tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
+ CMD_FLAGS(sc));
+
+abort_fail:
+ spin_unlock_irqrestore(io_lock, flags);
+ if (rqi)
+ snic_release_req_buf(snic, rqi, sc);
+
+ return ret;
+} /* end of snic_abort_finish */
+
+/*
+ * snic_send_abort_and_wait : Issues Abort, and Waits
+ */
+static int
+snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ enum snic_ioreq_state sv_state;
+ struct snic_tgt *tgt = NULL;
+ spinlock_t *io_lock = NULL;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+ unsigned long flags;
+ int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
+
+ tgt = starget_to_tgt(scsi_target(sc->device));
+ if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
+ tmf = SNIC_ITMF_ABTS_TASK_TERM;
+ else
+ tmf = SNIC_ITMF_ABTS_TASK;
+
+ /* stats */
+
+ io_lock = snic_io_lock_hash(snic, sc);
+
+ /*
+ * Avoid a race between SCSI issuing the abort and the device
+ * completing the command.
+ *
+ * If the command is already completed by fw_cmpl code,
+ * we just return SUCCESS from here. This means that the abort
+ * succeeded. In the SCSI ML, since the timeout for command has
+ * happend, the completion wont actually complete the command
+ * and it will be considered as an aborted command
+ *
+ * The CMD_SP will not be cleared except while holding io_lock
+ */
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ SNIC_HOST_ERR(snic->shost,
+ "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
+ tag, CMD_FLAGS(sc));
+
+ ret = SUCCESS;
+
+ goto send_abts_end;
+ }
+
+ rqi->abts_done = &tm_done;
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ ret = 0;
+ goto abts_pending;
+ }
+ SNIC_BUG_ON(!rqi->abts_done);
+
+ /* Save Command State, should be restored on failed to Queue. */
+ sv_state = CMD_STATE(sc);
+
+ /*
+ * Command is still pending, need to abort it
+ * If the fw completes the command after this point,
+ * the completion won't be done till mid-layer, since abot
+ * has already started.
+ */
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
+
+ SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now Queue the abort command to firmware */
+ ret = snic_queue_abort_req(snic, rqi, sc, tmf);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
+ tag, ret, CMD_FLAGS(sc));
+
+ spin_lock_irqsave(io_lock, flags);
+ /* Restore Command's previous state */
+ CMD_STATE(sc) = sv_state;
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (rqi)
+ rqi->abts_done = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+
+ goto send_abts_end;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ if (tmf == SNIC_ITMF_ABTS_TASK) {
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
+ atomic64_inc(&snic->s_stats.abts.num);
+ } else {
+ /* term stats */
+ CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+
+ SNIC_SCSI_DBG(snic->shost,
+ "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
+ sc, tag, CMD_FLAGS(sc));
+
+
+ ret = 0;
+
+abts_pending:
+ /*
+ * Queued an abort IO, wait for its completion.
+ * Once the fw completes the abort command, it will
+ * wakeup this thread.
+ */
+ wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
+
+send_abts_end:
+ return ret;
+} /* end of snic_send_abort_and_wait */
+
+/*
+ * This function is exported to SCSI for sending abort cmnds.
+ * A SCSI IO is represent by snic_ioreq in the driver.
+ * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
+ */
+int
+snic_abort_cmd(struct scsi_cmnd *sc)
+{
+ struct snic *snic = shost_priv(sc->device->host);
+ int ret = SUCCESS, tag = snic_cmd_tag(sc);
+ u32 start_time = jiffies;
+
+ SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
+ sc, sc->cmnd[0], sc->request, tag);
+
+ if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
+ SNIC_HOST_ERR(snic->shost,
+ "abt_cmd: tag %x Parent Devs are not rdy\n",
+ tag);
+ ret = FAST_IO_FAIL;
+
+ goto abort_end;
+ }
+
+
+ ret = snic_send_abort_and_wait(snic, sc);
+ if (ret)
+ goto abort_end;
+
+ ret = snic_abort_finish(snic, sc);
+
+abort_end:
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time), 0,
+ SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ SNIC_SCSI_DBG(snic->shost,
+ "abts: Abort Req Status = %s\n",
+ (ret == SUCCESS) ? "SUCCESS" :
+ ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
+
+ return ret;
+}
+
+
+
+static int
+snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
+{
+ struct snic_req_info *rqi = NULL;
+ struct scsi_cmnd *sc = NULL;
+ struct scsi_device *lr_sdev = NULL;
+ spinlock_t *io_lock = NULL;
+ u32 tag;
+ unsigned long flags;
+
+ if (lr_sc)
+ lr_sdev = lr_sc->device;
+
+ /* walk through the tag map, an dcheck if IOs are still pending in fw*/
+ for (tag = 0; tag < snic->max_tag_id; tag++) {
+ io_lock = snic_io_lock_tag(snic, tag);
+
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(snic->shost, tag);
+
+ if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending w/ firmware and belongs to
+ * the LUN that is under reset, if lr_sc != NULL
+ */
+ SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
+ snic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ return 1;
+ }
+
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+
+ return 0;
+} /* end of snic_is_abts_pending */
+
+static int
+snic_dr_clean_single_req(struct snic *snic,
+ u32 tag,
+ struct scsi_device *lr_sdev)
+{
+ struct snic_req_info *rqi = NULL;
+ struct snic_tgt *tgt = NULL;
+ struct scsi_cmnd *sc = NULL;
+ spinlock_t *io_lock = NULL;
+ u32 sv_state = 0, tmf = 0;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+ unsigned long flags;
+ int ret = 0;
+
+ io_lock = snic_io_lock_tag(snic, tag);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(snic->shost, tag);
+
+ /* Ignore Cmd that don't belong to Lun Reset device */
+ if (!sc || sc->device != lr_sdev)
+ goto skip_clean;
+
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+
+ if (!rqi)
+ goto skip_clean;
+
+
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
+ goto skip_clean;
+
+
+ if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
+
+ SNIC_SCSI_DBG(snic->shost,
+ "clean_single_req: devrst is not pending sc 0x%p\n",
+ sc);
+
+ goto skip_clean;
+ }
+
+ SNIC_SCSI_DBG(snic->shost,
+ "clean_single_req: Found IO in %s on lun\n",
+ snic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ /* Save Command State */
+ sv_state = CMD_STATE(sc);
+
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
+ * to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and handled
+ * in this function.
+ */
+
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
+ SNIC_BUG_ON(rqi->abts_done);
+
+ if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
+ rqi->tm_tag = SNIC_TAG_DEV_RST;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "clean_single_req:devrst sc 0x%p\n", sc);
+ }
+
+ CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
+ rqi->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ tgt = starget_to_tgt(scsi_target(sc->device));
+ if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
+ tmf = SNIC_ITMF_ABTS_TASK_TERM;
+ else
+ tmf = SNIC_ITMF_ABTS_TASK;
+
+ /* Now queue the abort command to firmware */
+ ret = snic_queue_abort_req(snic, rqi, sc, tmf);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
+ sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
+
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (rqi)
+ rqi->abts_done = NULL;
+
+ /* Restore Command State */
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = sv_state;
+
+ ret = 1;
+ goto skip_clean;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
+
+ CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
+
+ /* Recheck cmd state to check if it now aborted. */
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
+ goto skip_clean;
+ }
+ rqi->abts_done = NULL;
+
+ /* if abort is still pending w/ fw, fail */
+ if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
+ SNIC_HOST_ERR(snic->shost,
+ "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
+ sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
+ ret = 1;
+
+ goto skip_clean;
+ }
+
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ snic_release_req_buf(snic, rqi, sc);
+
+ ret = 0;
+
+ return ret;
+
+skip_clean:
+ spin_unlock_irqrestore(io_lock, flags);
+
+ return ret;
+} /* end of snic_dr_clean_single_req */
+
+static int
+snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
+{
+ struct scsi_device *lr_sdev = lr_sc->device;
+ u32 tag = 0;
+ int ret = FAILED;
+
+ for (tag = 0; tag < snic->max_tag_id; tag++) {
+ if (tag == snic_cmd_tag(lr_sc))
+ continue;
+
+ ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
+
+ goto clean_err;
+ }
+ }
+
+ schedule_timeout(msecs_to_jiffies(100));
+
+ /* Walk through all the cmds and check abts status. */
+ if (snic_is_abts_pending(snic, lr_sc)) {
+ ret = FAILED;
+
+ goto clean_err;
+ }
+
+ ret = 0;
+ SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
+
+ return ret;
+
+clean_err:
+ ret = FAILED;
+ SNIC_HOST_ERR(snic->shost,
+ "Failed to Clean Pending IOs on %s device.\n",
+ dev_name(&lr_sdev->sdev_gendev));
+
+ return ret;
+
+} /* end of snic_dr_clean_pending_req */
+
+/*
+ * snic_dr_finish : Called by snic_device_reset
+ */
+static int
+snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ int lr_res = 0;
+ int ret = FAILED;
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ spin_unlock_irqrestore(io_lock, flags);
+ SNIC_SCSI_DBG(snic->shost,
+ "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
+ snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
+
+ ret = FAILED;
+ goto dr_fini_end;
+ }
+
+ rqi->dr_done = NULL;
+
+ lr_res = CMD_LR_STATUS(sc);
+
+ switch (lr_res) {
+ case SNIC_INVALID_CODE:
+ /* stats */
+ SNIC_SCSI_DBG(snic->shost,
+ "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
+ snic_cmd_tag(sc), CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
+ ret = FAILED;
+
+ goto dr_failed;
+
+ case SNIC_STAT_IO_SUCCESS:
+ SNIC_SCSI_DBG(snic->shost,
+ "dr_fini: Tag %x Dev Reset cmpl\n",
+ snic_cmd_tag(sc));
+ ret = 0;
+ break;
+
+ default:
+ SNIC_HOST_ERR(snic->shost,
+ "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
+ snic_cmd_tag(sc),
+ snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
+ ret = FAILED;
+ goto dr_failed;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * Cleanup any IOs on this LUN that have still not completed.
+ * If any of these fail, then LUN Reset fails.
+ * Cleanup cleans all commands on this LUN except
+ * the lun reset command. If all cmds get cleaned, the LUN Reset
+ * succeeds.
+ */
+
+ ret = snic_dr_clean_pending_req(snic, sc);
+ if (ret) {
+ spin_lock_irqsave(io_lock, flags);
+ SNIC_SCSI_DBG(snic->shost,
+ "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
+ snic_cmd_tag(sc));
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+
+ goto dr_failed;
+ } else {
+ /* Cleanup LUN Reset Command */
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (rqi)
+ ret = SUCCESS; /* Completed Successfully */
+ else
+ ret = FAILED;
+ }
+
+dr_failed:
+ SNIC_BUG_ON(!spin_is_locked(io_lock));
+ if (rqi)
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (rqi)
+ snic_release_req_buf(snic, rqi, sc);
+
+dr_fini_end:
+ return ret;
+} /* end of snic_dr_finish */
+
+static int
+snic_queue_dr_req(struct snic *snic,
+ struct snic_req_info *rqi,
+ struct scsi_cmnd *sc)
+{
+ /* Add special tag for device reset */
+ rqi->tm_tag |= SNIC_TAG_DEV_RST;
+
+ return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
+}
+
+static int
+snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ enum snic_ioreq_state sv_state;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+ int ret = FAILED, tag = snic_cmd_tag(sc);
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ SNIC_HOST_ERR(snic->shost,
+ "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
+ tag, CMD_FLAGS(sc));
+ spin_unlock_irqrestore(io_lock, flags);
+
+ ret = FAILED;
+ goto send_dr_end;
+ }
+
+ /* Save Command state to restore in case Queuing failed. */
+ sv_state = CMD_STATE(sc);
+
+ CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
+ CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
+
+ SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
+
+ rqi->dr_done = &tm_done;
+ SNIC_BUG_ON(!rqi->dr_done);
+
+ spin_unlock_irqrestore(io_lock, flags);
+ /*
+ * The Command state is changed to IOREQ_PENDING,
+ * in this case, if the command is completed, the icmnd_cmpl will
+ * mark the cmd as completed.
+ * This logic still makes LUN Reset is inevitable.
+ */
+
+ ret = snic_queue_dr_req(snic, rqi, sc);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
+ tag, ret, CMD_FLAGS(sc));
+
+ spin_lock_irqsave(io_lock, flags);
+ /* Restore State */
+ CMD_STATE(sc) = sv_state;
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (rqi)
+ rqi->dr_done = NULL;
+ /* rqi is freed in caller. */
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+
+ goto send_dr_end;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ ret = 0;
+
+ wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
+
+send_dr_end:
+ return ret;
+}
+
+/*
+ * auxillary funciton to check lun reset op is supported or not
+ * Not supported if returns 0
+ */
+static int
+snic_dev_reset_supported(struct scsi_device *sdev)
+{
+ struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
+
+ if (tgt->tdata.typ == SNIC_TGT_DAS)
+ return 0;
+
+ return 1;
+}
+
+static void
+snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
+{
+ struct snic_req_info *rqi = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ u32 start_time = jiffies;
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (rqi) {
+ start_time = rqi->start_time;
+ CMD_SP(sc) = NULL;
+ }
+
+ CMD_FLAGS(sc) |= flag;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (rqi)
+ snic_release_req_buf(snic, rqi, sc);
+
+ SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
+ SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+}
+
+/*
+ * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
+ * fail to get aborted. It calls driver's eh_device_reset with a SCSI
+ * command on the LUN.
+ */
+int
+snic_device_reset(struct scsi_cmnd *sc)
+{
+ struct Scsi_Host *shost = sc->device->host;
+ struct snic *snic = shost_priv(shost);
+ struct snic_req_info *rqi = NULL;
+ int tag = snic_cmd_tag(sc);
+ int start_time = jiffies;
+ int ret = FAILED;
+ int dr_supp = 0;
+
+ SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
+ sc, sc->cmnd[0], sc->request,
+ snic_cmd_tag(sc));
+ dr_supp = snic_dev_reset_supported(sc->device);
+ if (!dr_supp) {
+ /* device reset op is not supported */
+ SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
+ snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
+
+ goto dev_rst_end;
+ }
+
+ if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
+ snic_unlink_and_release_req(snic, sc, 0);
+ SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
+
+ goto dev_rst_end;
+ }
+
+ /* There is no tag when lun reset is issue through ioctl. */
+ if (unlikely(tag <= SNIC_NO_TAG)) {
+ SNIC_HOST_INFO(snic->shost,
+ "Devrst: LUN Reset Recvd thru IOCTL.\n");
+
+ rqi = snic_req_init(snic, 0);
+ if (!rqi)
+ goto dev_rst_end;
+
+ memset(scsi_cmd_priv(sc), 0,
+ sizeof(struct snic_internal_io_state));
+ CMD_SP(sc) = (char *)rqi;
+ CMD_FLAGS(sc) = SNIC_NO_FLAGS;
+
+ /* Add special tag for dr coming from user spc */
+ rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
+ rqi->sc = sc;
+ }
+
+ ret = snic_send_dr_and_wait(snic, sc);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
+ tag, ret);
+
+ snic_unlink_and_release_req(snic, sc, 0);
+
+ goto dev_rst_end;
+ }
+
+ ret = snic_dr_finish(snic, sc);
+
+dev_rst_end:
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ SNIC_SCSI_DBG(snic->shost,
+ "Devrst: Returning from Device Reset : %s\n",
+ (ret == SUCCESS) ? "SUCCESS" : "FAILED");
+
+ return ret;
+} /* end of snic_device_reset */
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ */
+/*
+ * snic_issue_hba_reset : Queues FW Reset Request.
+ */
+static int
+snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+ struct snic_host_req *req = NULL;
+ spinlock_t *io_lock = NULL;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ rqi = snic_req_init(snic, 0);
+ if (!rqi) {
+ ret = -ENOMEM;
+
+ goto hba_rst_end;
+ }
+
+ if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
+ memset(scsi_cmd_priv(sc), 0,
+ sizeof(struct snic_internal_io_state));
+ SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
+ rqi->sc = sc;
+ }
+
+ req = rqi_to_req(rqi);
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ SNIC_BUG_ON(CMD_SP(sc) != NULL);
+ CMD_STATE(sc) = SNIC_IOREQ_PENDING;
+ CMD_SP(sc) = (char *) rqi;
+ CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
+ snic->remove_wait = &wait;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Initialize Request */
+ snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
+ snic->config.hid, 0, (ulong) rqi);
+
+ req->u.reset.flags = 0;
+
+ ret = snic_queue_wq_desc(snic, req, sizeof(*req));
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "issu_hr:Queuing HBA Reset Failed. w err %d\n",
+ ret);
+
+ goto hba_rst_err;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&snic->s_stats.reset.hba_resets);
+ SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
+
+ wait_for_completion_timeout(snic->remove_wait,
+ SNIC_HOST_RESET_TIMEOUT);
+
+ if (snic_get_state(snic) == SNIC_FWRESET) {
+ SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
+ ret = -ETIMEDOUT;
+
+ goto hba_rst_err;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ snic->remove_wait = NULL;
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (rqi)
+ snic_req_free(snic, rqi);
+
+ ret = 0;
+
+ return ret;
+
+hba_rst_err:
+ spin_lock_irqsave(io_lock, flags);
+ snic->remove_wait = NULL;
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (rqi)
+ snic_req_free(snic, rqi);
+
+hba_rst_end:
+ SNIC_HOST_ERR(snic->shost,
+ "reset:HBA Reset Failed w/ err = %d.\n",
+ ret);
+
+ return ret;
+} /* end of snic_issue_hba_reset */
+
+int
+snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
+{
+ struct snic *snic = shost_priv(shost);
+ enum snic_state sv_state;
+ unsigned long flags;
+ int ret = FAILED;
+
+ /* Set snic state as SNIC_FWRESET*/
+ sv_state = snic_get_state(snic);
+
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ if (snic_get_state(snic) == SNIC_FWRESET) {
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
+
+ msleep(SNIC_HOST_RESET_TIMEOUT);
+ ret = SUCCESS;
+
+ goto reset_end;
+ }
+
+ snic_set_state(snic, SNIC_FWRESET);
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+ schedule_timeout(msecs_to_jiffies(1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "reset:Host Reset Failed w/ err %d.\n",
+ ret);
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ snic_set_state(snic, sv_state);
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
+ ret = FAILED;
+
+ goto reset_end;
+ }
+
+ ret = SUCCESS;
+
+reset_end:
+ return ret;
+} /* end of snic_reset */
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ */
+int
+snic_host_reset(struct scsi_cmnd *sc)
+{
+ struct Scsi_Host *shost = sc->device->host;
+ u32 start_time = jiffies;
+ int ret = FAILED;
+
+ SNIC_SCSI_DBG(shost,
+ "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
+ sc, sc->cmnd[0], sc->request,
+ snic_cmd_tag(sc), CMD_FLAGS(sc));
+
+ ret = snic_reset(shost, sc);
+
+ SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ return ret;
+} /* end of snic_host_reset */
+
+/*
+ * snic_cmpl_pending_tmreq : Caller should hold io_lock
+ */
+static void
+snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
+{
+ struct snic_req_info *rqi = NULL;
+
+ SNIC_SCSI_DBG(snic->shost,
+ "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
+ sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
+
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi)
+ return;
+
+ if (rqi->dr_done)
+ complete(rqi->dr_done);
+ else if (rqi->abts_done)
+ complete(rqi->abts_done);
+}
+
+/*
+ * snic_scsi_cleanup: Walks through tag map and releases the reqs
+ */
+static void
+snic_scsi_cleanup(struct snic *snic, int ex_tag)
+{
+ struct snic_req_info *rqi = NULL;
+ struct scsi_cmnd *sc = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ int tag;
+ u64 st_time = 0;
+
+ SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
+
+ for (tag = 0; tag < snic->max_tag_id; tag++) {
+ /* Skip ex_tag */
+ if (tag == ex_tag)
+ continue;
+
+ io_lock = snic_io_lock_tag(snic, tag);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(snic->shost, tag);
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+
+ if (unlikely(snic_tmreq_pending(sc))) {
+ /*
+ * When FW Completes reset w/o sending completions
+ * for outstanding ios.
+ */
+ snic_cmpl_pending_tmreq(snic, sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ goto cleanup;
+ }
+
+ SNIC_SCSI_DBG(snic->shost,
+ "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
+ sc, rqi, tag, CMD_FLAGS(sc));
+
+ CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
+ spin_unlock_irqrestore(io_lock, flags);
+ st_time = rqi->start_time;
+
+ SNIC_HOST_INFO(snic->shost,
+ "sc_clean: Releasing rqi %p : flags 0x%llx\n",
+ rqi, CMD_FLAGS(sc));
+
+ snic_release_req_buf(snic, rqi, sc);
+
+cleanup:
+ sc->result = DID_TRANSPORT_DISRUPTED << 16;
+ SNIC_HOST_INFO(snic->shost,
+ "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n",
+ sc, rqi, (jiffies - st_time));
+
+ /* Update IO stats */
+ snic_stats_update_io_cmpl(&snic->s_stats);
+
+ if (sc->scsi_done) {
+ SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
+ jiffies_to_msecs(jiffies - st_time), 0,
+ SNIC_TRC_CMD(sc),
+ SNIC_TRC_CMD_STATE_FLAGS(sc));
+
+ sc->scsi_done(sc);
+ }
+ }
+} /* end of snic_scsi_cleanup */
+
+void
+snic_shutdown_scsi_cleanup(struct snic *snic)
+{
+ SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
+
+ snic_scsi_cleanup(snic, SCSI_NO_TAG);
+} /* end of snic_shutdown_scsi_cleanup */
+
+/*
+ * snic_internal_abort_io
+ * called by : snic_tgt_scsi_abort_io
+ */
+static int
+snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
+{
+ struct snic_req_info *rqi = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ u32 sv_state = 0;
+ int ret = 0;
+
+ io_lock = snic_io_lock_hash(snic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ rqi = (struct snic_req_info *) CMD_SP(sc);
+ if (!rqi)
+ goto skip_internal_abts;
+
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
+ goto skip_internal_abts;
+
+ if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
+
+ SNIC_SCSI_DBG(snic->shost,
+ "internal_abts: dev rst not pending sc 0x%p\n",
+ sc);
+
+ goto skip_internal_abts;
+ }
+
+
+ if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
+ SNIC_SCSI_DBG(snic->shost,
+ "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
+ sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
+
+ goto skip_internal_abts;
+ }
+
+ sv_state = CMD_STATE(sc);
+ CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
+ CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
+
+ if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
+ /* stats */
+ rqi->tm_tag = SNIC_TAG_DEV_RST;
+ SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
+ }
+
+ SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
+ snic_cmd_tag(sc));
+ SNIC_BUG_ON(rqi->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ ret = snic_queue_abort_req(snic, rqi, sc, tmf);
+ if (ret) {
+ SNIC_HOST_ERR(snic->shost,
+ "internal_abts: Tag = %x , Failed w/ err = %d\n",
+ snic_cmd_tag(sc), ret);
+
+ spin_lock_irqsave(io_lock, flags);
+
+ if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = sv_state;
+
+ goto skip_internal_abts;
+ }
+
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
+
+ ret = SUCCESS;
+
+skip_internal_abts:
+ SNIC_BUG_ON(!spin_is_locked(io_lock));
+ spin_unlock_irqrestore(io_lock, flags);
+
+ return ret;
+} /* end of snic_internal_abort_io */
+
+/*
+ * snic_tgt_scsi_abort_io : called by snic_tgt_del
+ */
+int
+snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
+{
+ struct snic *snic = NULL;
+ struct scsi_cmnd *sc = NULL;
+ struct snic_tgt *sc_tgt = NULL;
+ spinlock_t *io_lock = NULL;
+ unsigned long flags;
+ int ret = 0, tag, abt_cnt = 0, tmf = 0;
+
+ if (!tgt)
+ return -1;
+
+ snic = shost_priv(snic_tgt_to_shost(tgt));
+ SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
+
+ if (tgt->tdata.typ == SNIC_TGT_DAS)
+ tmf = SNIC_ITMF_ABTS_TASK;
+ else
+ tmf = SNIC_ITMF_ABTS_TASK_TERM;
+
+ for (tag = 0; tag < snic->max_tag_id; tag++) {
+ io_lock = snic_io_lock_tag(snic, tag);
+
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(snic->shost, tag);
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+
+ sc_tgt = starget_to_tgt(scsi_target(sc->device));
+ if (sc_tgt != tgt) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ continue;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+
+ ret = snic_internal_abort_io(snic, sc, tmf);
+ if (ret < 0) {
+ SNIC_HOST_ERR(snic->shost,
+ "tgt_abt_io: Tag %x, Failed w err = %d\n",
+ tag, ret);
+
+ continue;
+ }
+
+ if (ret == SUCCESS)
+ abt_cnt++;
+ }
+
+ SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
+
+ return 0;
+} /* end of snic_tgt_scsi_abort_io */
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
new file mode 100644
index 000000000000..11e614849a82
--- /dev/null
+++ b/drivers/scsi/snic/snic_stats.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __SNIC_STATS_H
+#define __SNIC_STATS_H
+
+struct snic_io_stats {
+ atomic64_t active; /* Active IOs */
+ atomic64_t max_active; /* Max # active IOs */
+ atomic64_t max_sgl; /* Max # SGLs for any IO */
+ atomic64_t max_time; /* Max time to process IO */
+ atomic64_t max_qtime; /* Max time to Queue the IO */
+ atomic64_t max_cmpl_time; /* Max time to complete the IO */
+ atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */
+ atomic64_t max_io_sz; /* Max IO Size */
+ atomic64_t compl; /* IO Completions */
+ atomic64_t fail; /* IO Failures */
+ atomic64_t req_null; /* req or req info is NULL */
+ atomic64_t alloc_fail; /* Alloc Failures */
+ atomic64_t sc_null;
+ atomic64_t io_not_found; /* IO Not Found */
+ atomic64_t num_ios; /* Number of IOs */
+};
+
+struct snic_abort_stats {
+ atomic64_t num; /* Abort counter */
+ atomic64_t fail; /* Abort Failure Counter */
+ atomic64_t drv_tmo; /* Abort Driver Timeouts */
+ atomic64_t fw_tmo; /* Abort Firmware Timeouts */
+ atomic64_t io_not_found;/* Abort IO Not Found */
+};
+
+struct snic_reset_stats {
+ atomic64_t dev_resets; /* Device Reset Counter */
+ atomic64_t dev_reset_fail; /* Device Reset Failures */
+ atomic64_t dev_reset_aborts; /* Device Reset Aborts */
+ atomic64_t dev_reset_tmo; /* Device Reset Timeout */
+ atomic64_t dev_reset_terms; /* Device Reset terminate */
+ atomic64_t hba_resets; /* hba/firmware resets */
+ atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */
+ atomic64_t hba_reset_fail; /* hba/firmware failures */
+ atomic64_t snic_resets; /* snic resets */
+ atomic64_t snic_reset_compl; /* snic reset completions */
+ atomic64_t snic_reset_fail; /* snic reset failures */
+};
+
+struct snic_fw_stats {
+ atomic64_t actv_reqs; /* Active Requests */
+ atomic64_t max_actv_reqs; /* Max Active Requests */
+ atomic64_t out_of_res; /* Firmware Out Of Resources */
+ atomic64_t io_errs; /* Firmware IO Firmware Errors */
+ atomic64_t scsi_errs; /* Target hits check condition */
+};
+
+struct snic_misc_stats {
+ u64 last_isr_time;
+ u64 last_ack_time;
+ atomic64_t isr_cnt;
+ atomic64_t max_cq_ents; /* Max CQ Entries */
+ atomic64_t data_cnt_mismat; /* Data Count Mismatch */
+ atomic64_t io_tmo;
+ atomic64_t io_aborted;
+ atomic64_t sgl_inval; /* SGL Invalid */
+ atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */
+ atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */
+ atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */
+ atomic64_t no_icmnd_itmf_cmpls;
+ atomic64_t io_under_run;
+ atomic64_t qfull;
+ atomic64_t tgt_not_rdy;
+};
+
+struct snic_stats {
+ struct snic_io_stats io;
+ struct snic_abort_stats abts;
+ struct snic_reset_stats reset;
+ struct snic_fw_stats fw;
+ struct snic_misc_stats misc;
+ atomic64_t io_cmpl_skip;
+};
+
+int snic_stats_debugfs_init(struct snic *);
+void snic_stats_debugfs_remove(struct snic *);
+
+/* Auxillary function to update active IO counter */
+static inline void
+snic_stats_update_active_ios(struct snic_stats *s_stats)
+{
+ struct snic_io_stats *io = &s_stats->io;
+ u32 nr_active_ios;
+
+ nr_active_ios = atomic64_inc_return(&io->active);
+ if (atomic64_read(&io->max_active) < nr_active_ios)
+ atomic64_set(&io->max_active, nr_active_ios);
+
+ atomic64_inc(&io->num_ios);
+}
+
+/* Auxillary function to update IO completion counter */
+static inline void
+snic_stats_update_io_cmpl(struct snic_stats *s_stats)
+{
+ atomic64_dec(&s_stats->io.active);
+ if (unlikely(atomic64_read(&s_stats->io_cmpl_skip)))
+ atomic64_dec(&s_stats->io_cmpl_skip);
+ else
+ atomic64_inc(&s_stats->io.compl);
+}
+#endif /* __SNIC_STATS_H */
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
new file mode 100644
index 000000000000..28a40a7ade38
--- /dev/null
+++ b/drivers/scsi/snic/snic_trc.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+
+#include "snic_io.h"
+#include "snic.h"
+
+/*
+ * snic_get_trc_buf : Allocates a trace record and returns.
+ */
+struct snic_trc_data *
+snic_get_trc_buf(void)
+{
+ struct snic_trc *trc = &snic_glob->trc;
+ struct snic_trc_data *td = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&trc->lock, flags);
+ td = &trc->buf[trc->wr_idx];
+ trc->wr_idx++;
+
+ if (trc->wr_idx == trc->max_idx)
+ trc->wr_idx = 0;
+
+ if (trc->wr_idx != trc->rd_idx) {
+ spin_unlock_irqrestore(&trc->lock, flags);
+
+ goto end;
+ }
+
+ trc->rd_idx++;
+ if (trc->rd_idx == trc->max_idx)
+ trc->rd_idx = 0;
+
+ td->ts = 0; /* Marker for checking the record, for complete data*/
+ spin_unlock_irqrestore(&trc->lock, flags);
+
+end:
+
+ return td;
+} /* end of snic_get_trc_buf */
+
+/*
+ * snic_fmt_trc_data : Formats trace data for printing.
+ */
+static int
+snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
+{
+ int len = 0;
+ struct timespec tmspec;
+
+ jiffies_to_timespec(td->ts, &tmspec);
+
+ len += snprintf(buf, buf_sz,
+ "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
+ tmspec.tv_sec,
+ tmspec.tv_nsec,
+ td->fn,
+ td->hno,
+ td->tag,
+ td->data[0], td->data[1], td->data[2], td->data[3],
+ td->data[4]);
+
+ return len;
+} /* end of snic_fmt_trc_data */
+
+/*
+ * snic_get_trc_data : Returns a formatted trace buffer.
+ */
+int
+snic_get_trc_data(char *buf, int buf_sz)
+{
+ struct snic_trc_data *td = NULL;
+ struct snic_trc *trc = &snic_glob->trc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&trc->lock, flags);
+ if (trc->rd_idx == trc->wr_idx) {
+ spin_unlock_irqrestore(&trc->lock, flags);
+
+ return -1;
+ }
+ td = &trc->buf[trc->rd_idx];
+
+ if (td->ts == 0) {
+ /* write in progress. */
+ spin_unlock_irqrestore(&trc->lock, flags);
+
+ return -1;
+ }
+
+ trc->rd_idx++;
+ if (trc->rd_idx == trc->max_idx)
+ trc->rd_idx = 0;
+ spin_unlock_irqrestore(&trc->lock, flags);
+
+ return snic_fmt_trc_data(td, buf, buf_sz);
+} /* end of snic_get_trc_data */
+
+/*
+ * snic_trc_init() : Configures Trace Functionality for snic.
+ */
+int
+snic_trc_init(void)
+{
+ struct snic_trc *trc = &snic_glob->trc;
+ void *tbuf = NULL;
+ int tbuf_sz = 0, ret;
+
+ tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
+ tbuf = vmalloc(tbuf_sz);
+ if (!tbuf) {
+ SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
+ SNIC_ERR("Trace Facility not enabled.\n");
+ ret = -ENOMEM;
+
+ return ret;
+ }
+
+ memset(tbuf, 0, tbuf_sz);
+ trc->buf = (struct snic_trc_data *) tbuf;
+ spin_lock_init(&trc->lock);
+
+ ret = snic_trc_debugfs_init();
+ if (ret) {
+ SNIC_ERR("Failed to create Debugfs Files.\n");
+
+ goto error;
+ }
+
+ trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
+ trc->rd_idx = trc->wr_idx = 0;
+ trc->enable = 1;
+ SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n",
+ tbuf_sz / PAGE_SIZE);
+ ret = 0;
+
+ return ret;
+
+error:
+ snic_trc_free();
+
+ return ret;
+} /* end of snic_trc_init */
+
+/*
+ * snic_trc_free : Releases the trace buffer and disables the tracing.
+ */
+void
+snic_trc_free(void)
+{
+ struct snic_trc *trc = &snic_glob->trc;
+
+ trc->enable = 0;
+ snic_trc_debugfs_term();
+
+ if (trc->buf) {
+ vfree(trc->buf);
+ trc->buf = NULL;
+ }
+
+ SNIC_INFO("Trace Facility Disabled.\n");
+} /* end of snic_trc_free */
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
new file mode 100644
index 000000000000..427faee5f97e
--- /dev/null
+++ b/drivers/scsi/snic/snic_trc.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __SNIC_TRC_H
+#define __SNIC_TRC_H
+
+#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int snic_trace_max_pages;
+
+/* Global Data structure for trace to manage trace functionality */
+struct snic_trc_data {
+ u64 ts; /* Time Stamp */
+ char *fn; /* Ptr to Function Name */
+ u32 hno; /* SCSI Host ID */
+ u32 tag; /* Command Tag */
+ u64 data[5];
+} __attribute__((__packed__));
+
+#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */
+
+struct snic_trc {
+ spinlock_t lock;
+ struct snic_trc_data *buf; /* Trace Buffer */
+ u32 max_idx; /* Max Index into trace buffer */
+ u32 rd_idx;
+ u32 wr_idx;
+ u32 enable; /* Control Variable for Tracing */
+
+ struct dentry *trc_enable; /* debugfs file object */
+ struct dentry *trc_file;
+};
+
+int snic_trc_init(void);
+void snic_trc_free(void);
+int snic_trc_debugfs_init(void);
+void snic_trc_debugfs_term(void);
+struct snic_trc_data *snic_get_trc_buf(void);
+int snic_get_trc_data(char *buf, int buf_sz);
+
+int snic_debugfs_init(void);
+void snic_debugfs_term(void);
+
+static inline void
+snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5)
+{
+ struct snic_trc_data *tr_rec = snic_get_trc_buf();
+
+ if (!tr_rec)
+ return;
+
+ tr_rec->fn = (char *)fn;
+ tr_rec->hno = hno;
+ tr_rec->tag = tag;
+ tr_rec->data[0] = d1;
+ tr_rec->data[1] = d2;
+ tr_rec->data[2] = d3;
+ tr_rec->data[3] = d4;
+ tr_rec->data[4] = d5;
+ tr_rec->ts = jiffies; /* Update time stamp at last */
+}
+
+#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
+ do { \
+ if (unlikely(snic_glob->trc.enable)) \
+ snic_trace((char *)__func__, \
+ (u16)(_hno), \
+ (u32)(_tag), \
+ (u64)(d1), \
+ (u64)(d2), \
+ (u64)(d3), \
+ (u64)(d4), \
+ (u64)(d5)); \
+ } while (0)
+#else
+
+#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
+ do { \
+ if (unlikely(snic_log_level & 0x2)) \
+ SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \
+ (char *)__func__, \
+ (u16)(_hno), \
+ (u32)(_tag), \
+ (u64)(d1), \
+ (u64)(d2), \
+ (u64)(d3), \
+ (u64)(d4), \
+ (u64)(d5)); \
+ } while (0)
+#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */
+
+#define SNIC_TRC_CMD(sc) \
+ ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \
+ (u64)sc->cmnd[5])
+
+#define SNIC_TRC_CMD_STATE_FLAGS(sc) \
+ ((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc))
+
+#endif /* end of __SNIC_TRC_H */
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
new file mode 100644
index 000000000000..4c8e64e4fba6
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void svnic_cq_free(struct vnic_cq *cq)
+{
+ svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ pr_err("Failed to hook CQ[%d] resource\n", index);
+
+ return -EINVAL;
+ }
+
+ err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void svnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ svnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h
new file mode 100644
index 000000000000..6e651c3e16f7
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+};
+
+static inline unsigned int svnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+void svnic_cq_free(struct vnic_cq *cq);
+int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size);
+void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void svnic_cq_clean(struct vnic_cq *cq);
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h
new file mode 100644
index 000000000000..c2d1bbd44bd1
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq_fw.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_CQ_FW_H_
+#define _VNIC_CQ_FW_H_
+
+#include "snic_fwint.h"
+
+static inline unsigned int
+vnic_cq_fw_service(struct vnic_cq *cq,
+ int (*q_service)(struct vnic_dev *vdev,
+ unsigned int index,
+ struct snic_fw_req *desc),
+ unsigned int work_to_do)
+
+{
+ struct snic_fw_req *desc;
+ unsigned int work_done = 0;
+ u8 color;
+
+ desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ snic_color_dec(desc, &color);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq->index, desc))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ snic_color_dec(desc, &color);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+#endif /* _VNIC_CQ_FW_H_ */
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
new file mode 100644
index 000000000000..e0b5549bc9fb
--- /dev/null
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/slab.h>
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+#include "vnic_wq.h"
+
+#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
+#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
+
+struct devcmd2_controller {
+ struct vnic_wq_ctrl __iomem *wq_ctrl;
+ struct vnic_dev_ring results_ring;
+ struct vnic_wq wq;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ unsigned int count;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 *linkstatus;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct devcmd2_controller *devcmd2;
+
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *svnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar, unsigned int num_bars)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (num_bars == 0)
+ return -EINVAL;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ pr_err("vNIC BAR0 res hdr length error\n");
+
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ if (!rh) {
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+
+ return -EINVAL;
+ }
+
+ if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+ ioread32(&rh->version) != VNIC_RES_VERSION) {
+ pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+
+ return -EINVAL;
+ }
+
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num >= num_bars)
+ continue;
+
+ if (!bar[bar_num].len || !bar[bar_num].vaddr)
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar->len) {
+ pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar->len);
+
+ return -EINVAL;
+ }
+ break;
+
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ case RES_TYPE_DEVCMD2:
+ len = count;
+ break;
+
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = ALIGN(desc_count, count_align);
+
+ ring->desc_size = ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ svnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+ ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned);
+
+ if (!ring->descs_unaligned) {
+ pr_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+
+ return -ENOMEM;
+ }
+
+ ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ svnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+ if (ring->descs) {
+ pci_free_consistent(vdev->pdev,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
+ ring->descs = NULL;
+ }
+}
+
+static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ unsigned int i;
+ int delay;
+ int err;
+ u32 posted;
+ u32 new_posted;
+
+ posted = ioread32(&dc2c->wq_ctrl->posted_index);
+
+ if (posted == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: return error */
+ return -ENODEV;
+ }
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+ }
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+ udelay(100);
+ if (result->color == dc2c->color) {
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+ if (result->error) {
+ err = (int) result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+
+ return err;
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ /*
+ * Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which
+ * would potentially result in reading stale
+ * values.
+ */
+ rmb();
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = result->results[i];
+ }
+
+ return 0;
+ }
+ }
+
+ pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
+
+ return -ETIMEDOUT;
+}
+
+static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ struct devcmd2_controller *dc2c = NULL;
+ unsigned int fetch_idx;
+ int ret;
+ void __iomem *p;
+
+ if (vdev->devcmd2)
+ return 0;
+
+ p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (!p)
+ return -ENODEV;
+
+ dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
+ if (!dc2c)
+ return -ENOMEM;
+
+ vdev->devcmd2 = dc2c;
+
+ dc2c->color = 1;
+ dc2c->result_size = DEVCMD2_RING_SIZE;
+
+ ret = vnic_wq_devcmd2_alloc(vdev,
+ &dc2c->wq,
+ DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+ if (ret)
+ goto err_free_devcmd2;
+
+ fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
+ if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_idx = 0;
+ }
+
+ /*
+ * Don't change fetch_index ever and
+ * set posted_index same as fetch_index
+ * when setting up the WQ for devcmd2.
+ */
+ vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
+ svnic_wq_enable(&dc2c->wq);
+ ret = svnic_dev_alloc_desc_ring(vdev,
+ &dc2c->results_ring,
+ DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+ if (ret)
+ goto err_free_wq;
+
+ dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
+ dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
+ dc2c->wq_ctrl = dc2c->wq.ctrl;
+ vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
+ if (ret < 0)
+ goto err_free_desc_ring;
+
+ vdev->devcmd_rtn = &_svnic_dev_cmd2;
+ pr_info("DEVCMD2 Initialized.\n");
+
+ return ret;
+
+err_free_desc_ring:
+ svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
+
+err_free_wq:
+ svnic_wq_disable(&dc2c->wq);
+ svnic_wq_free(&dc2c->wq);
+
+err_free_devcmd2:
+ kfree(dc2c);
+ vdev->devcmd2 = NULL;
+
+ return ret;
+} /* end of svnic_dev_init_devcmd2 */
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+
+ vdev->devcmd2 = NULL;
+ vdev->devcmd_rtn = NULL;
+
+ svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
+ svnic_wq_disable(&dc2c->wq);
+ svnic_wq_free(&dc2c->wq);
+ kfree(dc2c);
+}
+
+int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+ vdev->args[0] = *a0;
+ vdev->args[1] = *a1;
+
+ err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
+
+ *a0 = vdev->args[0];
+ *a1 = vdev->args[1];
+
+ return err;
+}
+
+int svnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info)
+{
+ u64 a0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+ int err = 0;
+
+ if (!vdev->fw_info) {
+ vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa);
+ if (!vdev->fw_info)
+ return -ENOMEM;
+
+ a0 = vdev->fw_info_pa;
+
+ /* only get fw_info once and cache it */
+ err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+ }
+
+ *fw_info = vdev->fw_info;
+
+ return err;
+}
+
+int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+ unsigned int size, void *value)
+{
+ u64 a0, a1;
+ int wait = VNIC_DVCMD_TMO;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int svnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+
+ return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = VNIC_DVCMD_TMO;
+
+ if (!vdev->stats) {
+ vdev->stats = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_stats), &vdev->stats_pa);
+ if (!vdev->stats)
+ return -ENOMEM;
+ }
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int svnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+
+ return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int svnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+ int err = 0;
+
+ err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN)
+ return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+
+ return err;
+}
+
+int svnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+
+ return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int svnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+
+ return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+ int err;
+
+ *done = 0;
+
+ err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ u64 a0, a1;
+ int wait = VNIC_DVCMD_TMO;
+
+ if (!vdev->notify) {
+ vdev->notify = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &vdev->notify_pa);
+ if (!vdev->notify)
+ return -ENOMEM;
+ }
+
+ a0 = vdev->notify_pa;
+ a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void svnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = VNIC_DVCMD_TMO;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify)
+ return 0;
+
+ do {
+ csum = 0;
+ memcpy(&vdev->notify_copy, vdev->notify,
+ sizeof(struct vnic_devcmd_notify));
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int svnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = VNIC_DVCMD_TMO;
+
+ return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+int svnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (vdev->linkstatus)
+ return *vdev->linkstatus;
+
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_down_cnt;
+}
+
+void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+void svnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->linkstatus)
+ pci_free_consistent(vdev->pdev,
+ sizeof(u32),
+ vdev->linkstatus,
+ vdev->linkstatus_pa);
+ if (vdev->stats)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ if (vdev->devcmd2)
+ vnic_dev_deinit_devcmd2(vdev);
+ kfree(vdev);
+ }
+}
+
+struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
+ void *priv,
+ struct pci_dev *pdev,
+ struct vnic_dev_bar *bar,
+ unsigned int num_bars)
+{
+ if (!vdev) {
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar, num_bars))
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ svnic_dev_unregister(vdev);
+
+ return NULL;
+} /* end of svnic_dev_alloc_discover */
+
+/*
+ * fallback option is left to keep the interface common for other vnics.
+ */
+int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
+{
+ int err = -ENODEV;
+ void __iomem *p;
+
+ p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (p)
+ err = svnic_dev_init_devcmd2(vdev);
+ else
+ pr_err("DEVCMD2 resource not found.\n");
+
+ return err;
+} /* end of svnic_dev_cmd_init */
diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h
new file mode 100644
index 000000000000..e65726da6504
--- /dev/null
+++ b/drivers/scsi/snic/vnic_dev.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(lower_32_bits(val), reg);
+ writel(upper_32_bits(val), reg + 0x4UL);
+}
+#endif
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *svnic_dev_priv(struct vnic_dev *vdev);
+unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size);
+void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void svnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int svnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+ unsigned int size, void *value);
+int svnic_dev_stats_clear(struct vnic_dev *vdev);
+int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void svnic_dev_notify_unset(struct vnic_dev *vdev);
+int svnic_dev_link_status(struct vnic_dev *vdev);
+u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev);
+int svnic_dev_close(struct vnic_dev *vdev);
+int svnic_dev_enable_wait(struct vnic_dev *vdev);
+int svnic_dev_disable(struct vnic_dev *vdev);
+int svnic_dev_open(struct vnic_dev *vdev, int arg);
+int svnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int svnic_dev_init(struct vnic_dev *vdev, int arg);
+struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev,
+ struct vnic_dev_bar *bar,
+ unsigned int num_bars);
+void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void svnic_dev_unregister(struct vnic_dev *vdev);
+int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
new file mode 100644
index 000000000000..d81b4f0ceaaa
--- /dev/null
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* nic_cfg in (u32)a0 */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /*
+ * Initialization for the devcmd2 interface.
+ * in: (u64) a0=host result buffer physical address
+ * in: (u16) a1=number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd; /* same command #defines as original */
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index; /* into copy WQ */
+ u8 error; /* same error codes as original */
+ u8 color; /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
+#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c
new file mode 100644
index 000000000000..a7d54806787d
--- /dev/null
+++ b/drivers/scsi/snic/vnic_intr.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void svnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ pr_err("Failed to hook INTR[%d].ctrl resource\n",
+ index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void svnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h
new file mode 100644
index 000000000000..4547f603fe5e
--- /dev/null
+++ b/drivers/scsi/snic/vnic_intr.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_MAX 0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void
+svnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void
+svnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void
+svnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits,
+ int unmask,
+ int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int
+svnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void
+svnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = svnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ svnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+void svnic_intr_free(struct vnic_intr *);
+int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int);
+void svnic_intr_init(struct vnic_intr *intr,
+ unsigned int coalescing_timer,
+ unsigned int coalescing_type,
+ unsigned int mask_on_assertion);
+void svnic_intr_clean(struct vnic_intr *);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h
new file mode 100644
index 000000000000..9713d6835db3
--- /dev/null
+++ b/drivers/scsi/snic/vnic_resource.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_RSVD1,
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSVD2,
+ RES_TYPE_RSVD3,
+ RES_TYPE_RSVD4,
+ RES_TYPE_RSVD5,
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_RSVD6,
+ RES_TYPE_RSVD7,
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
+
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h
new file mode 100644
index 000000000000..514d39f5cf00
--- /dev/null
+++ b/drivers/scsi/snic/vnic_snic.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_SNIC_H_
+#define _VNIC_SNIC_H_
+
+#define VNIC_SNIC_WQ_DESCS_MIN 64
+#define VNIC_SNIC_WQ_DESCS_MAX 1024
+
+#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256
+#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112
+
+#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1
+#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024
+
+#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0
+#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000
+
+#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0
+#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255
+
+#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1
+#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024
+
+/* Device-specific region: scsi configuration */
+struct vnic_snic_config {
+ u32 flags;
+ u32 wq_enet_desc_count;
+ u32 io_throttle_count;
+ u32 port_down_timeout;
+ u32 port_down_io_retries;
+ u32 luns_per_tgt;
+ u16 maxdatafieldsize;
+ u16 intr_timer;
+ u8 intr_timer_type;
+ u8 _resvd2;
+ u8 xpt_type;
+ u8 hid;
+};
+#endif /* _VNIC_SNIC_H_ */
diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h
new file mode 100644
index 000000000000..370a37c97748
--- /dev/null
+++ b/drivers/scsi/snic/vnic_stats.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c
new file mode 100644
index 000000000000..1e91d432089e
--- /dev/null
+++ b/drivers/scsi/snic/vnic_wq.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, enum vnic_res_type res_type)
+{
+ wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
+ if (!wq->ctrl)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size)
+{
+ return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
+ desc_size);
+}
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf;
+ unsigned int i, j, count = wq->ring.desc_count;
+ unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+ for (i = 0; i < blks; i++) {
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+ if (!wq->bufs[i]) {
+ pr_err("Failed to alloc wq_bufs\n");
+
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = wq->bufs[i];
+ for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) {
+ buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j;
+ buf->desc = (u8 *)wq->ring.descs +
+ wq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = wq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) {
+ buf->next = wq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ return 0;
+}
+
+void svnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = wq->vdev;
+
+ svnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
+
+ wq->ctrl = NULL;
+
+}
+
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+ err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
+ if (err) {
+ pr_err("Failed to get devcmd2 resource\n");
+
+ return err;
+ }
+
+ svnic_wq_disable(wq);
+
+ err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
+ if (err) {
+ pr_err("Failed to hook WQ[%d] resource\n", index);
+
+ return err;
+ }
+
+ svnic_wq_disable(wq);
+
+ err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ svnic_wq_free(wq);
+
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(count, &wq->ctrl->ring_size);
+ iowrite32(fetch_index, &wq->ctrl->fetch_index);
+ iowrite32(posted_index, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ wq->to_use = wq->to_clean =
+ &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
+}
+
+void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable,
+ error_interrupt_offset);
+}
+
+unsigned int svnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void svnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int svnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void svnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+ struct vnic_wq_buf *buf;
+
+ BUG_ON(ioread32(&wq->ctrl->enable));
+
+ buf = wq->to_clean;
+
+ while (svnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(wq, buf);
+
+ buf = wq->to_clean = buf->next;
+ wq->ring.desc_avail++;
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ svnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h
new file mode 100644
index 000000000000..7cc031c7ceba
--- /dev/null
+++ b/drivers/scsi/snic/vnic_wq.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq_buf {
+ struct vnic_wq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int len;
+ unsigned int index;
+ int sop;
+ void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLK_SZ \
+ (VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+ struct vnic_wq_buf *to_use;
+ struct vnic_wq_buf *to_clean;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *svnic_wq_next_desc(struct vnic_wq *wq)
+{
+ return wq->to_use->desc;
+}
+
+static inline void svnic_wq_post(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop, int eop)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->os_buf = eop ? os_buf : NULL;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ if (eop) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+ }
+ wq->to_use = buf;
+
+ wq->ring.desc_avail--;
+}
+
+static inline void svnic_wq_service(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ void (*buf_service)(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+ void *opaque)
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+ while (1) {
+
+ (*buf_service)(wq, cq_desc, buf, opaque);
+
+ wq->ring.desc_avail++;
+
+ wq->to_clean = buf->next;
+
+ if (buf->index == completed_index)
+ break;
+
+ buf = wq->to_clean;
+ }
+}
+
+void svnic_wq_free(struct vnic_wq *wq);
+int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size);
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int post_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+
+void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int svnic_wq_error_status(struct vnic_wq *wq);
+void svnic_wq_enable(struct vnic_wq *wq);
+int svnic_wq_disable(struct vnic_wq *wq);
+void svnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h
new file mode 100644
index 000000000000..68f62b6d105b
--- /dev/null
+++ b/drivers/scsi/snic/wq_enet_desc.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2014 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9a1c34205254..3f25b8fa921d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -471,6 +471,47 @@ static void st_release_request(struct st_request *streq)
kfree(streq);
}
+static void st_do_stats(struct scsi_tape *STp, struct request *req)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (req->cmd[0] == WRITE_6) {
+ now = ktime_sub(now, STp->stats->write_time);
+ atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
+ atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
+ atomic64_inc(&STp->stats->write_cnt);
+ if (req->errors) {
+ atomic64_add(atomic_read(&STp->stats->last_write_size)
+ - STp->buffer->cmdstat.residual,
+ &STp->stats->write_byte_cnt);
+ if (STp->buffer->cmdstat.residual > 0)
+ atomic64_inc(&STp->stats->resid_cnt);
+ } else
+ atomic64_add(atomic_read(&STp->stats->last_write_size),
+ &STp->stats->write_byte_cnt);
+ } else if (req->cmd[0] == READ_6) {
+ now = ktime_sub(now, STp->stats->read_time);
+ atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
+ atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
+ atomic64_inc(&STp->stats->read_cnt);
+ if (req->errors) {
+ atomic64_add(atomic_read(&STp->stats->last_read_size)
+ - STp->buffer->cmdstat.residual,
+ &STp->stats->read_byte_cnt);
+ if (STp->buffer->cmdstat.residual > 0)
+ atomic64_inc(&STp->stats->resid_cnt);
+ } else
+ atomic64_add(atomic_read(&STp->stats->last_read_size),
+ &STp->stats->read_byte_cnt);
+ } else {
+ now = ktime_sub(now, STp->stats->other_time);
+ atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
+ atomic64_inc(&STp->stats->other_cnt);
+ }
+ atomic64_dec(&STp->stats->in_flight);
+}
+
static void st_scsi_execute_end(struct request *req, int uptodate)
{
struct st_request *SRpnt = req->end_io_data;
@@ -480,6 +521,8 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
STp->buffer->cmdstat.residual = req->resid_len;
+ st_do_stats(STp, req);
+
tmp = SRpnt->bio;
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -496,6 +539,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
+ struct scsi_tape *STp = SRpnt->stp;
req = blk_get_request(SRpnt->stp->device->request_queue, write,
GFP_KERNEL);
@@ -516,6 +560,17 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
}
}
+ atomic64_inc(&STp->stats->in_flight);
+ if (cmd[0] == WRITE_6) {
+ atomic_set(&STp->stats->last_write_size, bufflen);
+ STp->stats->write_time = ktime_get();
+ } else if (cmd[0] == READ_6) {
+ atomic_set(&STp->stats->last_read_size, bufflen);
+ STp->stats->read_time = ktime_get();
+ } else {
+ STp->stats->other_time = ktime_get();
+ }
+
SRpnt->bio = req->bio;
req->cmd_len = COMMAND_SIZE(cmd[0]);
memset(req->cmd, 0, BLK_MAX_CDB);
@@ -4222,6 +4277,12 @@ static int st_probe(struct device *dev)
}
tpnt->index = error;
sprintf(disk->disk_name, "st%d", tpnt->index);
+ tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
+ if (tpnt->stats == NULL) {
+ sdev_printk(KERN_ERR, SDp,
+ "st: Can't allocate statistics.\n");
+ goto out_idr_remove;
+ }
dev_set_drvdata(dev, tpnt);
@@ -4241,6 +4302,8 @@ static int st_probe(struct device *dev)
out_remove_devs:
remove_cdevs(tpnt);
+ kfree(tpnt->stats);
+out_idr_remove:
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
@@ -4298,6 +4361,7 @@ static void scsi_tape_release(struct kref *kref)
disk->private_data = NULL;
put_disk(disk);
+ kfree(tpnt->stats);
kfree(tpnt);
return;
}
@@ -4513,6 +4577,184 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(options);
+/* Support for tape stats */
+
+/**
+ * read_cnt_show - return read count - count of reads made from tape drive
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t read_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->read_cnt));
+}
+static DEVICE_ATTR_RO(read_cnt);
+
+/**
+ * read_byte_cnt_show - return read byte count - tape drives
+ * may use blocks less than 512 bytes this gives the raw byte count of
+ * of data read from the tape drive.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t read_byte_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->read_byte_cnt));
+}
+static DEVICE_ATTR_RO(read_byte_cnt);
+
+/**
+ * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t read_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->tot_read_time));
+}
+static DEVICE_ATTR_RO(read_ns);
+
+/**
+ * write_cnt_show - write count - number of user calls
+ * to write(2) that have written data to tape.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t write_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->write_cnt));
+}
+static DEVICE_ATTR_RO(write_cnt);
+
+/**
+ * write_byte_cnt_show - write byte count - raw count of
+ * bytes written to tape.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t write_byte_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->write_byte_cnt));
+}
+static DEVICE_ATTR_RO(write_byte_cnt);
+
+/**
+ * write_ns_show - write ns - number of nanoseconds waiting on write
+ * requests to complete.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t write_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->tot_write_time));
+}
+static DEVICE_ATTR_RO(write_ns);
+
+/**
+ * in_flight_show - number of I/Os currently in flight -
+ * in most cases this will be either 0 or 1. It may be higher if someone
+ * has also issued other SCSI commands such as via an ioctl.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t in_flight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->in_flight));
+}
+static DEVICE_ATTR_RO(in_flight);
+
+/**
+ * io_ns_show - io wait ns - this is the number of ns spent
+ * waiting on all I/O to complete. This includes tape movement commands
+ * such as rewinding, seeking to end of file or tape, it also includes
+ * read and write. To determine the time spent on tape movement
+ * subtract the read and write ns from this value.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t io_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->tot_io_time));
+}
+static DEVICE_ATTR_RO(io_ns);
+
+/**
+ * other_cnt_show - other io count - this is the number of
+ * I/O requests other than read and write requests.
+ * Typically these are tape movement requests but will include driver
+ * tape movement. This includes only requests issued by the st driver.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t other_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->other_cnt));
+}
+static DEVICE_ATTR_RO(other_cnt);
+
+/**
+ * resid_cnt_show - A count of the number of times we get a residual
+ * count - this should indicate someone issuing reads larger than the
+ * block size on tape.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t resid_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld",
+ (long long)atomic64_read(&STm->tape->stats->resid_cnt));
+}
+static DEVICE_ATTR_RO(resid_cnt);
+
static struct attribute *st_dev_attrs[] = {
&dev_attr_defined.attr,
&dev_attr_default_blksize.attr,
@@ -4521,7 +4763,35 @@ static struct attribute *st_dev_attrs[] = {
&dev_attr_options.attr,
NULL,
};
-ATTRIBUTE_GROUPS(st_dev);
+
+static struct attribute *st_stats_attrs[] = {
+ &dev_attr_read_cnt.attr,
+ &dev_attr_read_byte_cnt.attr,
+ &dev_attr_read_ns.attr,
+ &dev_attr_write_cnt.attr,
+ &dev_attr_write_byte_cnt.attr,
+ &dev_attr_write_ns.attr,
+ &dev_attr_in_flight.attr,
+ &dev_attr_io_ns.attr,
+ &dev_attr_other_cnt.attr,
+ &dev_attr_resid_cnt.attr,
+ NULL,
+};
+
+static struct attribute_group stats_group = {
+ .name = "stats",
+ .attrs = st_stats_attrs,
+};
+
+static struct attribute_group st_group = {
+ .attrs = st_dev_attrs,
+};
+
+static const struct attribute_group *st_dev_groups[] = {
+ &st_group,
+ &stats_group,
+ NULL,
+};
/* The following functions may be useful for a larger audience. */
static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f3eee0f9f40c..b6486b5d8681 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -92,6 +92,27 @@ struct st_partstat {
int drv_file;
};
+/* Tape statistics */
+struct scsi_tape_stats {
+ atomic64_t read_byte_cnt; /* bytes read */
+ atomic64_t write_byte_cnt; /* bytes written */
+ atomic64_t in_flight; /* Number of I/Os in flight */
+ atomic64_t read_cnt; /* Count of read requests */
+ atomic64_t write_cnt; /* Count of write requests */
+ atomic64_t other_cnt; /* Count of other requests either
+ * implicit or from user space
+ * ioctl. */
+ atomic64_t resid_cnt; /* Count of resid_len > 0 */
+ atomic64_t tot_read_time; /* ktime spent completing reads */
+ atomic64_t tot_write_time; /* ktime spent completing writes */
+ atomic64_t tot_io_time; /* ktime spent doing any I/O */
+ ktime_t read_time; /* holds ktime request was queued */
+ ktime_t write_time; /* holds ktime request was queued */
+ ktime_t other_time; /* holds ktime request was queued */
+ atomic_t last_read_size; /* Number of bytes issued for last read */
+ atomic_t last_write_size; /* Number of bytes issued for last write */
+};
+
#define ST_NBR_PARTITIONS 4
/* The tape drive descriptor */
@@ -171,6 +192,7 @@ struct scsi_tape {
#endif
struct gendisk *disk;
struct kref kref;
+ struct scsi_tape_stats *stats;
};
/* Bit masks for use_pf */
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 0b7819f3e09b..5bdcbe8fa958 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -838,7 +838,6 @@ static struct scsi_host_template driver_template = {
.can_queue = 1,
.this_id = SYM53C416_SCSI_ID,
.sg_tablesize = 32,
- .cmd_per_lun = 1,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8a1f4b355416..e94538362536 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -73,7 +73,7 @@ config SCSI_UFSHCD_PLATFORM
config SCSI_UFS_QCOM
bool "QCOM specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
select PHY_QCOM_UFS
help
This selects the QCOM specific additions to UFSHCD platform driver.
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 6652a8171de6..4cdffa46d401 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -307,6 +307,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
static unsigned long
ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
{
+ struct ufs_qcom_host *host = hba->priv;
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
u32 tx_clk_cycles_per_us = 0;
@@ -330,6 +331,16 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
{UFS_HS_G2, 0x49},
};
+ /*
+ * The Qunipro controller does not use following registers:
+ * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
+ * UFS_REG_PA_LINK_STARTUP_TIMER
+ * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
+ * Aggregation logic.
+ */
+ if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+ goto out;
+
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
goto out_error;
@@ -683,6 +694,16 @@ out:
return ret;
}
+static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ if (host->hw_ver.major == 0x1)
+ return UFSHCI_VERSION_11;
+ else
+ return UFSHCI_VERSION_20;
+}
+
/**
* ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
* @hba: host controller instance
@@ -696,13 +717,24 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = hba->priv;
- if (host->hw_ver.major == 0x1)
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+ if (host->hw_ver.major == 0x01) {
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+ | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
+ | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+
+ if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
+ }
if (host->hw_ver.major >= 0x2) {
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
+
if (!ufs_qcom_cap_qunipro(host))
/* Legacy UniPro mode still need following quirks */
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+ hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+ | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
+ | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
}
@@ -1005,6 +1037,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.name = "qcom",
.init = ufs_qcom_init,
.exit = ufs_qcom_exit,
+ .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
.clk_scale_notify = ufs_qcom_clk_scale_notify,
.setup_clocks = ufs_qcom_setup_clocks,
.hce_enable_notify = ufs_qcom_hce_enable_notify,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 648a44675880..b0ade73f8c6a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -188,6 +188,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode);
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
{
@@ -269,6 +271,11 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) {
+ if (hba->vops && hba->vops->get_ufs_hci_version)
+ return hba->vops->get_ufs_hci_version(hba);
+ }
+
return ufshcd_readl(hba, REG_UFS_VERSION);
}
@@ -481,6 +488,15 @@ ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
}
/**
+ * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
* ufshcd_enable_run_stop_reg - Enable run-stop registers,
* When run-stop registers are set to 1, it indicates the
* host controller that it can process the requests
@@ -1326,7 +1342,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = false;
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
lrbp->command_type = UTP_CMD_TYPE_SCSI;
/* form UPIU before issuing the command */
@@ -2147,6 +2163,31 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *get = action[!!peer];
int ret;
+ struct ufs_pa_layer_attr orig_pwr_info;
+ struct ufs_pa_layer_attr temp_pwr_info;
+ bool pwr_mode_change = false;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
+ orig_pwr_info = hba->pwr_info;
+ temp_pwr_info = orig_pwr_info;
+
+ if (orig_pwr_info.pwr_tx == FAST_MODE ||
+ orig_pwr_info.pwr_rx == FAST_MODE) {
+ temp_pwr_info.pwr_tx = FASTAUTO_MODE;
+ temp_pwr_info.pwr_rx = FASTAUTO_MODE;
+ pwr_mode_change = true;
+ } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
+ orig_pwr_info.pwr_rx == SLOW_MODE) {
+ temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
+ temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
+ pwr_mode_change = true;
+ }
+ if (pwr_mode_change) {
+ ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+ if (ret)
+ goto out;
+ }
+ }
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
@@ -2161,6 +2202,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
if (mib_val)
*mib_val = uic_cmd.argument3;
+
+ if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
+ && pwr_mode_change)
+ ufshcd_change_power_mode(hba, &orig_pwr_info);
out:
return ret;
}
@@ -2249,6 +2294,16 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
struct uic_command uic_cmd = {0};
int ret;
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
+ if (ret) {
+ dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
@@ -2256,6 +2311,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
+out:
return ret;
}
@@ -2522,7 +2578,10 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
/* Configure interrupt aggregation */
- ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+ else
+ ufshcd_disable_intr_aggr(hba);
/* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -2628,6 +2687,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return 0;
}
+static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
+{
+ int tx_lanes, i, err = 0;
+
+ if (!peer)
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ else
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &tx_lanes);
+ for (i = 0; i < tx_lanes; i++) {
+ if (!peer)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ else
+ err = ufshcd_dme_peer_set(hba,
+ UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
+ 0);
+ if (err) {
+ dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
+ __func__, peer, i, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, true);
+}
+
/**
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
@@ -2665,6 +2760,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_device_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
/* Include any host controller configuration via UIC commands */
if (hba->vops && hba->vops->link_startup_notify) {
ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
@@ -3073,7 +3174,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- ufshcd_reset_intr_aggr(hba);
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index b47ff07698e8..c40a0e78a6c4 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -246,6 +246,7 @@ struct ufs_pwr_mode_info {
* @name: variant name
* @init: called when the driver is initialized
* @exit: called to cleanup everything done in init
+ * @get_ufs_hci_version: called to get UFS HCI version
* @clk_scale_notify: notifies that clks are scaled up/down
* @setup_clocks: called before touching any of the controller registers
* @setup_regulators: called before accessing the host controller
@@ -263,6 +264,7 @@ struct ufs_hba_variant_ops {
const char *name;
int (*init)(struct ufs_hba *);
void (*exit)(struct ufs_hba *);
+ u32 (*get_ufs_hci_version)(struct ufs_hba *);
void (*clk_scale_notify)(struct ufs_hba *);
int (*setup_clocks)(struct ufs_hba *, bool);
int (*setup_regulators)(struct ufs_hba *, bool);
@@ -417,11 +419,45 @@ struct ufs_hba {
unsigned int irq;
bool is_irq_enabled;
+ /* Interrupt aggregation support is broken */
+ #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
+
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0)
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
+
+ /*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+ #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
+
+ /*
+ * The attribute PA_RXHSUNTERMCAP specifies whether or not the
+ * inbound Link supports unterminated line in HS mode. Setting this
+ * attribute to 1 fixes moving to HS gear.
+ */
+ #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
+
+ /*
+ * This quirk needs to be enabled if the host contoller only allows
+ * accessing the peer dme attributes in AUTO mode (FAST AUTO or
+ * SLOW AUTO).
+ */
+ #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
+
+ /*
+ * This quirk needs to be enabled if the host contoller doesn't
+ * advertise the correct version in UFS_VER register. If this quirk
+ * is enabled, standard UFS host driver will call the vendor specific
+ * ops (get_ufs_hci_version) to get the correct version.
+ */
+ #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
@@ -478,6 +514,12 @@ struct ufs_hba {
#define UFSHCD_CAP_CLK_SCALING (1 << 2)
/* Allow auto bkops to enabled during runtime suspend */
#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
+ /*
+ * This capability allows host controller driver to use the UFS HCI's
+ * interrupt aggregation capability.
+ * CAUTION: Enabling this might reduce overall UFS throughput.
+ */
+#define UFSHCD_CAP_INTR_AGGR (1 << 4)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -502,6 +544,15 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
+{
+ if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
+ return true;
+ else
+ return false;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d5721199e9cc..0ae0967aaed8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -89,8 +89,9 @@ enum {
/* Controller UFSHCI version */
enum {
- UFSHCI_VERSION_10 = 0x00010000,
- UFSHCI_VERSION_11 = 0x00010100,
+ UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
+ UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
+ UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
};
/*
@@ -206,6 +207,9 @@ enum {
#define CONFIG_RESULT_CODE_MASK 0xFF
#define GENERIC_ERROR_CODE_MASK 0xFF
+/* GenSelectorIndex calculation macros for M-PHY attributes */
+#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))
#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index f164f24a4a55..285f77544c36 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -501,6 +501,7 @@ static void virtio_scsi_init_hdr(struct virtio_device *vdev,
cmd->crn = 0;
}
+#ifdef CONFIG_BLK_DEV_INTEGRITY
static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
@@ -524,6 +525,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
blk_rq_sectors(rq) *
bi->tuple_size);
}
+#endif
static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
struct virtio_scsi_vq *req_vq,
@@ -546,11 +548,14 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
+#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd_pi);
- } else {
+ } else
+#endif
+ {
virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd);
@@ -1002,6 +1007,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
shost->nr_hw_queues = num_queues;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
@@ -1010,6 +1016,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
scsi_host_set_prot(shost, host_prot);
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
}
+#endif
err = scsi_add_host(shost, &vdev->dev);
if (err)
@@ -1090,7 +1097,9 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_SCSI_F_CHANGE,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
VIRTIO_SCSI_F_T10_PI,
+#endif
};
static struct virtio_driver virtio_scsi_driver = {
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 289ad016d925..61346aa73178 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -882,7 +882,6 @@ static struct scsi_host_template wd719x_template = {
.can_queue = 255,
.this_id = 7,
.sg_tablesize = WD719X_SG,
- .cmd_per_lun = WD719X_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
};
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 185e30e4eb93..9c6dd45f95f5 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -2,8 +2,6 @@
#define _WD719X_H_
#define WD719X_SG 255 /* Scatter/gather size */
-#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but
- * this is 1 for now to be safe. */
struct wd719x_sglist {
__le32 ptr;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index d64b6ed9c0c9..aed49bf762b4 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -230,7 +230,6 @@ static struct scsi_host_template rtsx_host_template = {
/* queue commands only, only one command per LUN */
.can_queue = 1,
- .cmd_per_lun = 1,
/* unknown initiator id */
.this_id = -1,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e6114ff18f..4672bb1a24d0 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/idr.h>
#include <asm/unaligned.h>
-#include <scsi/scsi_device.h>
+#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 34c3cd1b05ce..5fabcd3d623f 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -17,7 +17,6 @@
* GNU General Public License for more details.
******************************************************************************/
-#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index b0224a77e26d..fe9a582ca6af 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <asm/unaligned.h>
-#include <scsi/scsi_device.h>
+#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 18b0f9703ff2..ce81f17ad1ba 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -30,7 +30,7 @@
#include <linux/ctype.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4f8d4d459aa4..8ca373774276 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -28,8 +28,7 @@
#include <linux/configfs.h>
#include <linux/export.h>
#include <linux/file.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ce5f768181ff..417f88b498c7 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -36,8 +36,8 @@
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 35bfe77160d8..41f4f270f919 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -29,8 +29,8 @@
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 3f27bfd816d8..d48379a258c7 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -31,8 +31,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/falloc.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8c965683789f..972ed1781ae2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -35,8 +35,7 @@
#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a15411c79ae9..7ca642361f9c 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -28,8 +28,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/file.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ecc5eaef13d6..26581e215141 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -36,9 +36,7 @@
#include <linux/module.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 820d3052b775..6d2007e35df6 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -16,13 +16,13 @@
#define PS_TIMEOUT_OTHER (500*HZ)
#include <linux/device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_device.h>
#include <linux/kref.h>
#include <linux/kobject.h>
+struct scsi_device;
+
struct pscsi_plugin_task {
- unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+ unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d16489b6a1a4..b2d8f6f91633 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -29,8 +29,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 733824e3825f..43719b393ca9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -25,7 +25,7 @@
#include <linux/ratelimit.h>
#include <linux/crc-t10dif.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 7912aa124385..52ea640274f4 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -24,7 +24,8 @@
#include <linux/module.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 03538994d2f7..40f6c1378041 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -33,9 +33,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/configfs.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 315ec3458eeb..a5bb0c46e57e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -27,8 +27,6 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/export.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 47f064415bf6..84de757bd458 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,8 +32,7 @@
#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 675f2d9d1f14..fdf867230e18 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -37,9 +37,7 @@
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 1738b1646988..e44cc94b12cb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -25,8 +25,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 07d2996d8c1f..5efef9a2a3d3 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -19,12 +19,13 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/idr.h>
+#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
#include <linux/uio_driver.h>
#include <net/genetlink.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 8fd680ac941b..5ec0d00edaa3 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -25,8 +25,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/configfs.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index edcafa4490c0..1bf78e7c994c 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -30,10 +30,6 @@
#include <linux/hash.h>
#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 65dce1345966..86b699b94c7b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -34,10 +34,6 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 583e755d8091..fe585d1cce23 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,10 +39,6 @@
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ccee7e332a4d..f2a616d4f2c4 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -32,10 +32,6 @@
#include <linux/rculist.h>
#include <linux/kref.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index f9b4882fce52..6ce932f90ef8 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -16,7 +16,6 @@
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/storage.h>
-#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.h b/drivers/usb/gadget/legacy/tcm_usb_gadget.h
index 8289219925b8..9fb3544cc80f 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.h
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.h
@@ -6,7 +6,6 @@
#include <linux/usb/composite.h>
#include <linux/usb/uas.h>
#include <linux/usb/storage.h>
-#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 6431d08c8d9d..a4dbb0cd80da 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -635,7 +635,6 @@ static struct scsi_host_template mts_scsi_host_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.this_id = -1,
- .cmd_per_lun = 1,
.use_clustering = 1,
.emulated = 1,
.slave_alloc = mts_slave_alloc,
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 0e400f382f3a..996ef1e882d3 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -558,7 +558,6 @@ struct scsi_host_template usb_stor_host_template = {
/* queue commands only, only one command per LUN */
.can_queue = 1,
- .cmd_per_lun = 1,
/* unknown initiator id */
.this_id = -1,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 6d3122afeed3..f68921909552 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -811,7 +811,6 @@ static struct scsi_host_template uas_host_template = {
.can_queue = 65536, /* Is there a limit on the _host_ ? */
.this_id = -1,
.sg_tablesize = SG_NONE,
- .cmd_per_lun = 1, /* until we override it */
.skip_settle_delay = 1,
.use_blk_tags = 1,
};
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ea32b386797f..5b30d27dab50 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -37,7 +37,8 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <asm/unaligned.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index b7f51504f85a..39223c3e99ad 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -49,10 +49,7 @@
#include <generated/utsrelease.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h> /* SG_ALL */
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>