aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 10:36:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 10:36:19 -0800
commitf290cbacb697b7bc8fc67d3988e330bec0e502ea (patch)
treeef17237c0625c5265bb2739a7402c9bacd52e981 /drivers/scsi
parentMerge tag 'arc-4.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc (diff)
parentMerge branch 'misc' into for-linus (diff)
downloadlinux-dev-f290cbacb697b7bc8fc67d3988e330bec0e502ea.tar.xz
linux-dev-f290cbacb697b7bc8fc67d3988e330bec0e502ea.zip
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull late SCSI updates from James Bottomley: "This is mostly stuff which missed the initial pull. There's a new driver: qedi, and some ufs, ibmvscsis and ncr5380 updates plus some assorted driver fixes and also a fix for the bug where if a device goes into a blocked state between configuration and sysfs device add (which can be a long time under async probing) it would become permanently blocked" * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (30 commits) scsi: avoid a permanent stop of the scsi device's request queue scsi: mpt3sas: Recognize and act on iopriority info scsi: qla2xxx: Fix Target mode handling with Multiqueue changes. scsi: qla2xxx: Add Block Multi Queue functionality. scsi: qla2xxx: Add multiple queue pair functionality. scsi: qla2xxx: Utilize pci_alloc_irq_vectors/pci_free_irq_vectors calls. scsi: qla2xxx: Only allow operational MBX to proceed during RESET. scsi: hpsa: remove memory allocate failure message scsi: Update 3ware driver email addresses scsi: zfcp: fix rport unblock race with LUN recovery scsi: zfcp: do not trace pure benign residual HBA responses at default level scsi: zfcp: fix use-after-"free" in FC ingress path after TMF scsi: libcxgbi: return error if interface is not up scsi: cxgb4i: libcxgbi: add missing module_put() scsi: cxgb4i: libcxgbi: cxgb4: add T6 iSCSI completion feature scsi: cxgb4i: libcxgbi: add active open cmd for T6 adapters scsi: cxgb4i: use cxgb4_tp_smt_idx() to get smt_idx scsi: qedi: Add QLogic FastLinQ offload iSCSI driver framework. scsi: aacraid: remove wildcard for series 9 controllers scsi: ibmvscsi: add write memory barrier to CRQ processing ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c7
-rw-r--r--drivers/scsi/3w-sas.h7
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c77
-rw-r--r--drivers/scsi/NCR5380.h11
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c320
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c40
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/g_NCR5380.c153
-rw-r--r--drivers/scsi/g_NCR5380.h2
-rw-r--r--drivers/scsi/hpsa.c37
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c43
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c34
-rw-r--r--drivers/scsi/qedi/Kconfig10
-rw-r--r--drivers/scsi/qedi/Makefile5
-rw-r--r--drivers/scsi/qedi/qedi.h364
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c143
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h144
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c244
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2378
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h73
-rw-r--r--drivers/scsi/qedi/qedi_hsi.h52
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1624
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h232
-rw-r--r--drivers/scsi/qedi/qedi_main.c2127
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c52
-rw-r--r--drivers/scsi/qedi/qedi_version.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h108
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c223
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c475
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c44
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h30
-rw-r--r--drivers/scsi/ufs/ufshcd.c55
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h7
54 files changed, 9425 insertions, 656 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..316f87fe3299 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,8 +1,8 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
Note: This version of the driver does not contain a bundled firmware
image.
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 0fdc83cfa0e1..b6c208cc474f 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,8 +1,8 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714..970d8fa6bd53 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1,7 +1,7 @@
/*
3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -43,10 +43,7 @@
LSI 3ware 9750 6Gb/s SAS/SATA-RAID
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
History
-------
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index fec6449c7595..05e77d84c16d 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -1,7 +1,7 @@
/*
3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -39,10 +39,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_SAS_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba1613e21..aa412ab02765 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,7 +1,7 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -47,10 +47,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
+
History
-------
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 6f65e663d393..69e80c1ed1ca 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,7 +1,7 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
+
+ aradford@gmail.com
For more information, goto:
http://www.lsi.com
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index dfa93347c752..a4f6b0d95515 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a2d03957cbe2..736b77414a4b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI) += libiscsi.o qedi/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d849ffa378b1..4f5ca794bb71 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -97,9 +97,6 @@
* and macros and include this file in your driver.
*
* These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
@@ -127,9 +124,7 @@
* NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID. If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
*/
#ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
#endif
-
-static int probe_irq;
-
-/**
- * probe_intr - helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
- probe_irq = irq;
- return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq - find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
- int possible)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long timeout;
- int trying_irqs, i, mask;
-
- for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
- trying_irqs |= mask;
-
- timeout = jiffies + msecs_to_jiffies(250);
- probe_irq = NO_IRQ;
-
- /*
- * A interrupt is triggered whenever BSY = false, SEL = true
- * and a bit set in the SELECT_ENABLE_REG is asserted on the
- * SCSI bus.
- *
- * Note that the bus is only driven when the phase control signals
- * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
- * to zero.
- */
-
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
- while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
- schedule_timeout_uninterruptible(1);
-
- NCR5380_write(SELECT_ENABLE_REG, 0);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if (trying_irqs & mask)
- free_irq(i, NULL);
-
- return probe_irq;
-}
-
/**
* NCR58380_info - report driver and host information
* @instance: relevant scsi host instance
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 3c6ce5434449..51a3567a6fb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,16 +199,6 @@
#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
-/*
- * These are "special" values for the irq and dma_channel fields of the
- * Scsi_Host structure
- */
-
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
-
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
#endif
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
static int NCR5380_init(struct Scsi_Host *instance, int flags);
static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
static void NCR5380_exit(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e4f3e22fcbd9..3ecbf20ca29f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
- { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9e6f647ff1c1..9a2fdc305cf2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
+ } else {
+ struct cpl_t6_act_open_req *req =
+ (struct cpl_t6_act_open_req *)skb->head;
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ req->rsvd = cpu_to_be32(isn);
+
+ opt2 |= T5_ISS_VALID;
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
- (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+ (&csk->saddr), (&csk->daddr),
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
csk->state, csk->flags, csk->atid, csk->rss_qid);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
- RX_FC_DISABLE_F |
RSS_QUEUE_V(csk->rss_qid);
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req6 *req =
(struct cpl_act_open_req6 *)skb->head;
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req6 *req =
(struct cpl_t5_act_open_req6 *)skb->head;
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
+ } else {
+ struct cpl_t6_act_open_req6 *req =
+ (struct cpl_t6_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
- t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+ csk->flags, csk->atid,
&csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
&csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
atid, tid, csk, csk->state, csk->flags, rcv_isn);
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
cxgbi_sock_get(csk);
csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
if (is_neg_adv(status))
goto rel_skb;
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *lskb;
+ u32 tid = GET_TID(cpl);
+ u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, skb,
+ skb->len, pdu_len_ddp);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cpl));
+ __pskb_trim(skb, ntohs(cpl->len));
+
+ if (!csk->skb_ulp_lhdr)
+ csk->skb_ulp_lhdr = skb;
+
+ lskb = csk->skb_ulp_lhdr;
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+ csk, csk->state, csk->flags, skb, lskb);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ spin_unlock_bh(&csk->lock);
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+ struct sk_buff *skb, u32 ddpvld)
+{
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+ }
+
+ if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+ !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+ }
+}
+
static void do_rx_data_ddp(struct cxgbi_device *cdev,
struct sk_buff *skb)
{
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
- unsigned int status = ntohl(rpl->ddpvld);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
- csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
spin_lock_bh(&csk->lock);
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
- if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
- }
- if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
- !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
- }
+ cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
__kfree_skb(skb);
}
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *data_skb = NULL;
+ u32 tid = GET_TID(rpl);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+ u32 seq = be32_to_cpu(rpl->seq);
+ u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+ "pdu_len_ddp %u, status %u.\n",
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+ ntohs(rpl->len), pdu_len_ddp, rpl->status);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = seq;
+ cxgbi_skcb_flags(skb) = 0;
+ cxgbi_skcb_rx_pdulen(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*rpl));
+ __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+ csk->rcv_nxt = seq + pdu_len_ddp;
+
+ if (csk->skb_ulp_lhdr) {
+ data_skb = skb_peek(&csk->receive_queue);
+ if (!data_skb ||
+ !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+ pr_err("Error! freelist data not found 0x%p, tid %u\n",
+ data_skb, tid);
+
+ goto abort_conn;
+ }
+ __skb_unlink(data_skb, &csk->receive_queue);
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ __skb_queue_tail(&csk->receive_queue, data_skb);
+ } else {
+ __skb_queue_tail(&csk->receive_queue, skb);
+ }
+
+ csk->skb_ulp_lhdr = NULL;
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+ cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+ cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+ log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+ csk, skb, cxgbi_skcb_flags(skb));
+
+ cxgbi_conn_pdu_ready(csk);
+ spin_unlock_bh(&csk->lock);
+
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
void *daddr;
unsigned int step;
unsigned int size, size6;
- int t4 = is_t4(lldi->adapter_type);
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
#endif
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
size = sizeof(struct cpl_act_open_req);
size6 = sizeof(struct cpl_act_open_req6);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
size = sizeof(struct cpl_t5_act_open_req);
size6 = sizeof(struct cpl_t5_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t6_act_open_req);
+ size6 = sizeof(struct cpl_t6_act_open_req6);
}
if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
- /* SMT two entries per row */
- csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+ cxgb4_port_viid(ndev));
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu, csk->mss_idx, csk->smac_idx);
/* must wait for either a act_open_rpl or act_open_establish */
- try_module_get(THIS_MODULE);
+ if (!try_module_get(cdev->owner)) {
+ pr_err("%s, try_module_get failed.\n", ndev->name);
+ goto rel_resource;
+ }
+
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
if (csk->csk_family == AF_INET)
send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
[CPL_FW4_ACK] = do_fw4_ack,
[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
- [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+ [CPL_ISCSI_DATA] = do_rx_iscsi_data,
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
[CPL_RX_DATA] = do_rx_data,
};
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
- cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+ cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+ CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
+ cdev->owner = THIS_MODULE;
cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
<< FW_VIID_PFN_S;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..9167bcd9fffe 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
n->dev->name, ndev->name, mtu);
}
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_neigh;
+ }
+
cdev = cxgbi_device_find_by_netdev(ndev, &port);
if (!cdev) {
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
}
ndev = n->dev;
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_rt;
+ }
+
if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
pr_info("multi-cast route %pI6 port %u, dev %s.\n",
daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+ struct module *owner = csk->cdev->owner;
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
__kfree_skb(skb);
+
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
return -EIO;
}
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+ cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+ /* If completion flag is set and data is directly
+ * placed in to the host memory then update
+ * task->exp_datasn to the datasn in completion
+ * iSCSI hdr as T6 adapter generates completion only
+ * for the last pdu of a sequence.
+ */
+ itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+ struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+ u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+ skb->data)->datasn);
+ if (task && task->sc) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ tcp_task->exp_datasn = data_sn;
+ }
+ }
+
return read_pdu_skb(conn, skb, 0, 0);
}
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
csk->rcv_wup, cdev->rx_credit_thres,
csk->rcv_win);
+ if (!cdev->rx_credit_thres)
+ return;
+
if (csk->state != CTP_ESTABLISHED)
return;
credits = csk->copied_seq - csk->rcv_wup;
if (unlikely(!credits))
return;
- if (unlikely(cdev->rx_credit_thres == 0))
- return;
-
must_send = credits + 16384 >= csk->rcv_win;
if (must_send || credits >= cdev->rx_credit_thres)
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..95ba99044c3e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
SKCBF_RX_STATUS, /* received ddp status */
+ SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
struct pci_dev *pdev;
struct dentry *debugfs_root;
struct iscsi_transport *itp;
+ struct module *owner;
unsigned int pfvf;
unsigned int rx_credit_thres;
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index de5147a8c959..6f9665d50d84 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -37,7 +37,7 @@
#define MAX_CARDS 8
/* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
module_param(hp_c2502, int, 0);
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /*
+ * An interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR.
+ */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG,
+ ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+ msleep(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int irq_mask, irq;
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ irq_mask = probe_irq_on();
+ g_NCR5380_trigger_irq(instance);
+ irq = probe_irq_off(irq_mask);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ if (irq <= 0)
+ return NO_IRQ;
+ return irq;
+}
+
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
outb(magic[3], 0x379);
outb(magic[4], 0x379);
- /* allowed IRQs for HP C2502 */
- if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
- irq = 0;
+ if (irq == 9)
+ irq = 2;
+
if (idx >= 0 && idx <= 7)
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+ while (*irq_table != -1) {
+ if (!request_irq(*irq_table, legacy_empty_irq_handler,
+ IRQF_PROBE_SHARED, "Test IRQ",
+ (void *)irq_table)) {
+ free_irq(*irq_table, (void *) irq_table);
+ return *irq_table;
+ }
+ irq_table++;
+ }
+ return -1;
+}
+
static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
+static int hp_c2502_irqs[] = {
+ 9, 5, 7, 3, 4, -1
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
}
}
+ /* Check for vacant slot */
+ NCR5380_write(MODE_REG, 0);
+ if (NCR5380_read(MODE_REG) != 0) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
NCR5380_maybe_reset_bus(instance);
- if (irq != IRQ_AUTO)
- instance->irq = irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
/* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ if (irq == 255 || irq == 0)
+ irq = NO_IRQ;
+ else if (irq == -1)
+ irq = IRQ_AUTO;
+
+ if (board == BOARD_HP_C2502) {
+ int *irq_table = hp_c2502_irqs;
+ int board_irq = -1;
+
+ switch (irq) {
+ case NO_IRQ:
+ board_irq = 0;
+ break;
+ case IRQ_AUTO:
+ board_irq = legacy_find_free_irq(irq_table);
+ break;
+ default:
+ while (*irq_table != -1)
+ if (*irq_table++ == irq)
+ board_irq = irq;
+ }
+
+ if (board_irq <= 0) {
+ board_irq = 0;
+ irq = NO_IRQ;
+ }
+
+ magic_configure(port_idx, board_irq, magic);
+ }
+
+ if (irq == IRQ_AUTO) {
+ instance->irq = g_NCR5380_probe_irq(instance);
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq detected\n");
+ } else {
+ instance->irq = irq;
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq provided\n");
+ }
if (instance->irq != NO_IRQ) {
- /* set IRQ for HP C2502 */
- if (board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = NO_IRQ;
+ shost_printk(KERN_INFO, instance,
+ "irq %d denied\n", instance->irq);
+ } else {
+ shost_printk(KERN_INFO, instance,
+ "irq %d acquired\n", instance->irq);
}
}
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
ret = scsi_add_host(instance, pdev);
if (ret)
goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
int ret = 0;
/* compatibility with old-style parameters */
- if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
irq[0] = ncr_irq;
base[0] = ncr_addr;
if (ncr_5380)
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 3ce5b65ccb00..81b22d989648 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -51,4 +51,6 @@
#define BOARD_DTC3181E 3
#define BOARD_HP_C2502 4
+#define IRQ_AUTO 254
+
#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 691a09316952..cbc0c5fe5a60 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
/* Device is not on the list, add it. */
device = kmalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ if (!device)
return;
- }
+
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
spin_lock_irqsave(&h->offline_device_lock, flags);
list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
- if (!h->cmd_sg_list) {
- dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+ if (!h->cmd_sg_list)
return -ENOMEM;
- }
+
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
- if (!h->cmd_sg_list[i]) {
- dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+ if (!h->cmd_sg_list[i])
goto clean;
- }
+
}
return 0;
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
struct bmic_sense_subsystem_info *ssi;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
- if (ssi == NULL) {
- dev_warn(&h->pdev->dev,
- "%s: out of memory\n", __func__);
+ if (!ssi)
return;
- }
rc = hpsa_bmic_sense_subsystem_information(h,
scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
- dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
- __FILE__, __LINE__);
h->drv_req_rescan = 1;
goto out;
}
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
*/
if (!h->lastlogicals)
- goto out;
+ return rc;
logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
- if (!logdev) {
- dev_warn(&h->pdev->dev,
- "Out of memory, can't track lun changes.\n");
- goto out;
- }
+ if (!logdev)
+ return rc;
+
if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
dev_warn(&h->pdev->dev,
"report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
return;
options = kzalloc(sizeof(*options), GFP_KERNEL);
- if (!options) {
- dev_err(&h->pdev->dev,
- "Error: failed to disable rld caching, during alloc.\n");
+ if (!options)
return;
- }
c = cmd_alloc(h);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee6ef52..50cd01165e35 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -95,6 +95,7 @@ static int fast_fail = 1;
static int client_reserve = 1;
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
} else {
done = 1;
}
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
dev_warn(evt_struct->hostdata->dev,
- "bad SRP RSP type %d\n", rsp->opcode);
+ "bad SRP RSP type %#02x\n", rsp->opcode);
}
if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ list_add_tail(&hostdata->host_list, &ibmvscsi_head);
return 0;
add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ list_del(&hostdata->host_list);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e0f6c3aeb4ee..3a7875575616 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
+ struct list_head host_list;
atomic_t request_limit;
int client_migrated;
int reset_crq;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8de0eda8cd00..394fe1338d09 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
u8 block;
u8 tlr_snoop_check;
u8 ignore_delay_remove;
+ /* Iopriority Command Handling */
+ u8 ncq_prio_enable;
+
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
u16 smid);
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 050bd788ad02..95f0f24bac05 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
/*********** diagnostic trigger suppport *** END ****************************/
-
-
/*****************************************/
struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!scsih_ncq_prio_supp(sdev))
+ return -EINVAL;
+
+ sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+ return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+ _ctl_device_ncq_prio_enable_show,
+ _ctl_device_ncq_prio_enable_store);
+
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5c8f75247d73..b5c966e319d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
+ struct request *rq = scmd->request;
+ int class;
Mpi2SCSIIORequest_t *mpi_request;
u32 mpi_control;
u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* set tags */
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+ /* NCQ Prio supported, make sure control indicated high priority */
+ if (sas_device_priv_data->ncq_prio_enable) {
+ class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (class == IOPRIO_CLASS_RT)
+ mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+ }
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+ unsigned char *buf;
+ bool ncq_prio_supp = false;
+
+ if (!scsi_device_supports_vpd(sdev))
+ return ncq_prio_supp;
+
+ buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+ if (!buf)
+ return ncq_prio_supp;
+
+ if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+ ncq_prio_supp = (buf[213] >> 4) & 1;
+
+ kfree(buf);
+ return ncq_prio_supp;
+}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644
index 000000000000..23ca8a274586
--- /dev/null
+++ b/drivers/scsi/qedi/Kconfig
@@ -0,0 +1,10 @@
+config QEDI
+ tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+ depends on PCI && SCSI
+ depends on QED
+ select SCSI_ISCSI_ATTRS
+ select QED_LL2
+ select QED_ISCSI
+ ---help---
+ This driver supports iSCSI offload for the QLogic FastLinQ
+ 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644
index 000000000000..2b3e16b24299
--- /dev/null
+++ b/drivers/scsi/qedi/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+ qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644
index 000000000000..5ca3e8c28a3f
--- /dev/null
+++ b/drivers/scsi/qedi/qedi.h
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL 0
+#define QEDI_MODE_RECOVERY 1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE 1
+#define QEDI_MAX_ISCSI_TASK 4096
+#define QEDI_MAX_TASK_NUM 0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON 1024
+
+#define QEDI_MAX_BD_LEN 0xffff
+#define QEDI_BD_SPLIT_SZ 0x1000
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_FAST_SGE_COUNT 4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF 8
+#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN 60000
+#define QEDI_LOCAL_PORT_MAX 61024
+#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID 0xffff
+#define TX_RX_RING 16
+#define RX_RING (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE 0x400
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PATH_HANDLE 0xFE0000000UL
+
+struct qedi_uio_ctrl {
+ /* meta data */
+ u32 uio_hsi_version;
+
+ /* user writes */
+ u32 host_tx_prod;
+ u32 host_rx_cons;
+ u32 host_rx_bd_cons;
+ u32 host_tx_pkt_len;
+ u32 host_rx_cons_cnt;
+
+ /* driver writes */
+ u32 hw_tx_cons;
+ u32 hw_rx_prod;
+ u32 hw_rx_bd_prod;
+ u32 hw_rx_prod_cnt;
+
+ /* other */
+ u8 mac_addr[6];
+ u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+ u32 rx_pkt_index;
+ u32 rx_pkt_len;
+ u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
+ (QEDI_MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+ struct uio_info qedi_uinfo;
+ u32 uio_dev;
+ struct list_head list;
+
+ u32 ll2_ring_size;
+ void *ll2_ring;
+
+ u32 ll2_buf_size;
+ void *ll2_buf;
+
+ void *rx_pkt;
+ void *tx_pkt;
+
+ struct qedi_ctx *qedi;
+ struct pci_dev *pdev;
+ void *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE 2048
+#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+ u64 hw_p_cq; /* Completion queue PBL */
+ u64 hw_p_rq; /* Request queue PBL */
+ u64 hw_p_cmdq; /* Command queue PBL */
+};
+
+struct global_queue {
+ union iscsi_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_mem_size;
+ u32 cq_cons_idx; /* Completion queue consumer index */
+
+ void *cq_pbl;
+ dma_addr_t cq_pbl_dma;
+ u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+ struct qed_sb_info *sb_info;
+ u16 sb_id;
+#define QEDI_NAME_SIZE 16
+ char name[QEDI_NAME_SIZE];
+ struct qedi_ctx *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+ struct list_head list;
+ struct iscsi_cqe_solicited cqe;
+ u16 que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+ spinlock_t lock; /* Port id lock */
+ u16 start;
+ u16 max;
+ u16 next;
+ unsigned long *table;
+};
+
+struct qedi_itt_map {
+ __le32 itt;
+ struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE 2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ 0
+#define QEDI_IO_TRACE_RSP 1
+ u8 direction;
+ u16 task_id;
+ u32 cid;
+ u32 port_id; /* Remote port fabric ID */
+ int lun;
+ u8 op; /* SCSI CDB */
+ u8 lba[4];
+ unsigned int bufflen; /* SCSI buffer length */
+ unsigned int sg_count; /* Number of SG elements */
+ u8 fast_sgs; /* number of fast sgls */
+ u8 slow_sgs; /* number of slow sgls */
+ u8 cached_sgs; /* number of cached sgls */
+ int result; /* Result passed back to mid-layer */
+ unsigned long jiffies; /* Time stamp when I/O logged */
+ int refcount; /* Reference count for task id */
+ unsigned int blk_req_cpu; /* CPU that the task is queued on by
+ * blk layer
+ */
+ unsigned int req_cpu; /* CPU that the task is queued on */
+ unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
+ unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+ * returned to blk layer
+ */
+ bool cached_sge;
+ bool slow_sge;
+ bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM 256
+#define QEDI_BDQ_BUF_SIZE 256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+ void *buf_addr;
+ dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+ struct qedi_dbg_ctx dbg_ctx;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct qed_dev *cdev;
+ struct qed_dev_iscsi_info dev_info;
+ struct qed_int_info int_info;
+ struct qedi_glbl_q_params *p_cpuq;
+ struct global_queue **global_queues;
+ /* uio declaration */
+ struct qedi_uio_dev *udev;
+ struct list_head ll2_skb_list;
+ spinlock_t ll2_lock; /* Light L2 lock */
+ spinlock_t hba_lock; /* per port lock */
+ struct task_struct *ll2_recv_thread;
+ unsigned long flags;
+#define UIO_DEV_OPENED 1
+#define QEDI_IOTHREAD_WAKE 2
+#define QEDI_IN_RECOVERY 5
+#define QEDI_IN_OFFLINE 6
+
+ u8 mac[ETH_ALEN];
+ u32 src_ip[4];
+ u8 ip_type;
+
+ /* Physical address of above array */
+ dma_addr_t hw_p_cpuq;
+
+ struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+ void *bdq_pbl;
+ dma_addr_t bdq_pbl_dma;
+ size_t bdq_pbl_mem_size;
+ void *bdq_pbl_list;
+ dma_addr_t bdq_pbl_list_dma;
+ u8 bdq_pbl_list_num_entries;
+ void __iomem *bdq_primary_prod;
+ void __iomem *bdq_secondary_prod;
+ u16 bdq_prod_idx;
+ u16 rq_num_entries;
+
+ u32 msix_count;
+ u32 max_sqes;
+ u8 num_queues;
+ u32 max_active_conns;
+
+ struct iscsi_cid_queue cid_que;
+ struct qedi_endpoint **ep_tbl;
+ struct qedi_portid_tbl lcl_port_tbl;
+
+ /* Rx fast path intr context */
+ struct qed_sb_info *sb_array;
+ struct qedi_fastpath *fp_array;
+ struct qed_iscsi_tid tasks;
+
+#define QEDI_LINK_DOWN 0
+#define QEDI_LINK_UP 1
+ atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID 0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
+ unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+ struct qedi_itt_map *itt_map;
+ u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+ struct qed_pf_params pf_params;
+
+ struct workqueue_struct *tmf_thread;
+ struct workqueue_struct *offload_thread;
+
+ u16 ll2_mtu;
+
+ struct workqueue_struct *dpc_wq;
+
+ spinlock_t task_idx_lock; /* To protect gbl context */
+ s32 last_tidx_alloc;
+ s32 last_tidx_clear;
+
+ struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+ spinlock_t io_trace_lock; /* prtect trace Log buf */
+ u16 io_trace_idx;
+ unsigned int intr_cpu;
+ u32 cached_sgls;
+ bool use_cached_sge;
+ u32 slow_sgls;
+ bool use_slow_sge;
+ u32 fast_sgls;
+ bool use_fast_sge;
+
+ atomic_t num_offloads;
+};
+
+struct qedi_work {
+ struct list_head list;
+ struct qedi_ctx *qedi;
+ union iscsi_cqe cqe;
+ u16 que_idx;
+ bool is_solicited;
+};
+
+struct qedi_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock; /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+ return (info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644
index 000000000000..2bdedb9c39bc
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_WARN))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_notice("[%s]:[%s:%d]:%d: %pV",
+ dev_name(&qedi->pdev->dev), nfunc, line,
+ qedi->host_no, &vaf);
+ else
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 level, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & level))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ int ret = 0;
+
+ for (; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ pr_err("Unable to create sysfs %s attr, err(%d).\n",
+ iter->name, ret);
+ }
+ return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ for (; iter->name; iter++)
+ sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644
index 000000000000..c55572badfb0
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */
+#define QEDI_LOG_INFO 0x2 /* Informational logs,
+ * MAC address, WWPN, WWNN
+ */
+#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */
+#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */
+#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */
+#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */
+#define QEDI_LOG_TIMER 0x40 /* Timer events */
+#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */
+#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */
+#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */
+#define QEDI_LOG_BSG 0x1000 /* BSG logs */
+#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */
+#define QEDI_LOG_LPORT 0x4000 /* lport logs */
+#define QEDI_LOG_ELS 0x8000 /* ELS logs */
+#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
+#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
+#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
+ * free
+ */
+#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be
+ * enabled only at module load
+ * and not run-time.
+ */
+#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes,
+ * done with reference to TID,
+ * hence TRACK_TID also enabled.
+ */
+#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */
+#define QEDI_LOG_WARN 0x80000000 /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+ unsigned int host_no;
+ struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...) \
+ qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...) \
+ qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...) \
+ qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...) \
+ qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \
+ ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+ char *name;
+ struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+ char *oper_str;
+ ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+ char *name;
+ struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = drv##_dbg_##ops##_cmd_read, \
+ .write = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = drv##_dbg_##ops##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644
index 000000000000..955936274241
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops)
+{
+ char host_dirname[32];
+ struct dentry *file_dentry = NULL;
+
+ sprintf(host_dirname, "host%u", qedi->host_no);
+ qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+ if (!qedi->bdf_dentry)
+ return;
+
+ while (dops) {
+ if (!(dops->name))
+ break;
+
+ file_dentry = debugfs_create_file(dops->name, 0600,
+ qedi->bdf_dentry, qedi,
+ fops);
+ if (!file_dentry) {
+ QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+ "Debugfs entry %s creation failed\n",
+ dops->name);
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ return;
+ }
+ dops++;
+ fops++;
+ }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+ qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+ if (!qedi_dbg_root)
+ QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+ debugfs_remove_recursive(qedi_dbg_root);
+ qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (!do_not_recover)
+ do_not_recover = 1;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (do_not_recover)
+ do_not_recover = 0;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+ { "enable", qedi_dbg_do_not_recover_enable },
+ { "disable", qedi_dbg_do_not_recover_disable },
+ { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+ { "gbl_ctx", NULL },
+ { "do_not_recover", qedi_dbg_do_not_recover_ops},
+ { "io_trace", NULL },
+ { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+ struct qedi_dbg_ctx *qedi_dbg =
+ (struct qedi_dbg_ctx *)filp->private_data;
+ struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+ if (*ppos)
+ return 0;
+
+ while (lof) {
+ if (!(lof->oper_str))
+ break;
+
+ if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+ cnt = lof->oper_func(qedi_dbg);
+ break;
+ }
+
+ lof++;
+ }
+ return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+
+ if (*ppos)
+ return 0;
+
+ cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+ struct qedi_fastpath *fp = NULL;
+ struct qed_sb_info *sb_info = NULL;
+ struct status_block *sb = NULL;
+ struct global_queue *que = NULL;
+ int id;
+ u16 prod_idx;
+ struct qedi_ctx *qedi = s->private;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+ fp = &qedi->fp_array[id];
+ sb_info = fp->sb_info;
+ sb = sb_info->sb_virt;
+ prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+ STATUS_BLOCK_PROD_INDEX_MASK);
+ seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+ que = qedi->global_queues[fp->sb_id];
+ seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+ seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+ seq_puts(s, "=========== END ==================\n\n\n");
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+ int id, idx = 0;
+ struct qedi_ctx *qedi = s->private;
+ struct qedi_io_log *io_log;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP IO LOGS:\n");
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+ idx = qedi->io_trace_idx;
+ for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+ io_log = &qedi->io_trace_buf[idx];
+ seq_printf(s, "iodir-%d:", io_log->direction);
+ seq_printf(s, "tid-0x%x:", io_log->task_id);
+ seq_printf(s, "cid-0x%x:", io_log->cid);
+ seq_printf(s, "lun-%d:", io_log->lun);
+ seq_printf(s, "op-0x%02x:", io_log->op);
+ seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+ io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+ seq_printf(s, "buflen-%d:", io_log->bufflen);
+ seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+ seq_printf(s, "res-0x%08x:", io_log->result);
+ seq_printf(s, "jif-%lu:", io_log->jiffies);
+ seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+ seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+ seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+ seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+ idx++;
+ if (idx == QEDI_IO_TRACE_SIZE)
+ idx = 0;
+ }
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+ return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+ qedi_dbg_fileops_seq(qedi, gbl_ctx),
+ qedi_dbg_fileops(qedi, do_not_recover),
+ qedi_dbg_fileops_seq(qedi, io_trace),
+ { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644
index 000000000000..b1d3904ae8fd
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.sge_valid && sc) {
+ cmd->io_tbl.sge_valid = 0;
+ scsi_dma_unmap(sc);
+ }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_logout_rsp *resp_hdr;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_logout_response_hdr *cqe_logout_response;
+ struct qedi_cmd *cmd;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+ spin_lock(&session->back_lock);
+ resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_logout_response->opcode;
+ resp_hdr->flags = cqe_logout_response->flags;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_text_rsp *resp_hdr_ptr;
+ struct iscsi_text_response_hdr *cqe_text_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr_ptr->opcode = cqe_text_response->opcode;
+ resp_hdr_ptr->flags = cqe_text_response->flags;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->ttt = cqe_text_response->ttt;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+ pld_len = cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_cls_session *cls_sess;
+ int rval = 0;
+
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+ iscsi_block_session(session->cls_session);
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+ if (rval) {
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ iscsi_unblock_session(session->cls_session);
+ return;
+ }
+
+ iscsi_unblock_session(session->cls_session);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ spin_unlock(&session->back_lock);
+ kfree(resp_hdr_ptr);
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tmf_response_hdr *cqe_tmp_response;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 *tmp;
+
+ cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+ qedi_cmd = task->dd_data;
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ if (!qedi_cmd->tmf_resp_buf) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate resp buf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+ /* Fill up the header */
+ resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+ resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+ resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_tmp_response->hdr_second_dword &
+ ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+ tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+ if (likely(qedi_cmd->io_cmd_in_list)) {
+ qedi_cmd->io_cmd_in_list = false;
+ list_del_init(&qedi_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+ goto unblock_sess;
+ }
+
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ kfree(resp_hdr_ptr);
+
+unblock_sess:
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_login_rsp *resp_hdr_ptr;
+ struct iscsi_login_response_hdr *cqe_login_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+
+ cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+ resp_hdr_ptr->opcode = cqe_login_response->opcode;
+ resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->tsih = cqe_login_response->tsih;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+ resp_hdr_ptr->status_class = cqe_login_response->status_class;
+ resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+ pld_len = cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+
+ spin_unlock(&session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ char *ptr, int len)
+{
+ u16 idx = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+ len, qedi->bdq_prod_idx,
+ (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+ cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+ switch (cqe->unsol_cqe_type) {
+ case ISCSI_CQE_UNSOLICITED_SINGLE:
+ case ISCSI_CQE_UNSOLICITED_FIRST:
+ if (len)
+ memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+ break;
+ case ISCSI_CQE_UNSOLICITED_MIDDLE:
+ case ISCSI_CQE_UNSOLICITED_LAST:
+ break;
+ default:
+ break;
+ }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ int count)
+{
+ u16 tmp;
+ u16 idx = 0;
+ struct scsi_bd *pbl;
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+ pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+ pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, idx);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+ /* Increment producer to let f/w know we've handled the frame */
+ qedi->bdq_prod_idx += count;
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ u32 pdu_len, u32 num_bdqs,
+ char *bdq_data)
+{
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "num_bdqs [%d]\n", num_bdqs);
+
+ qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+ qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn, u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_nop_in_hdr *cqe_nop_in;
+ struct iscsi_nopin *hdr;
+ struct qedi_cmd *cmd;
+ int tgt_async_nop = 0;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+ pdu_len = cqe_nop_in->hdr_second_dword &
+ ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_nop_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+ hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ goto done;
+ }
+
+ /* Response to one of our nop-outs */
+ if (task) {
+ cmd = task->dd_data;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ lun[0] = 0xffffffff;
+ lun[1] = 0xffffffff;
+ memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+ return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_async_msg_hdr *cqe_async_msg;
+ struct iscsi_async *resp_hdr;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+
+ cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+ pdu_len = cqe_async_msg->hdr_second_dword &
+ ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+
+ resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_async_msg->opcode;
+ resp_hdr->flags = 0x80;
+
+ lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+ lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+ memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+ resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+ resp_hdr->async_event = cqe_async_msg->async_event;
+ resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+ resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+ resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+ pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ uint16_t que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_reject_hdr *cqe_reject;
+ struct iscsi_reject *hdr;
+ u32 pld_len, num_bdqs;
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+ pld_len = cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pld_len, num_bdqs, conn->data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_reject->opcode;
+ hdr->reason = cqe_reject->hdr_reason;
+ hdr->flags = cqe_reject->hdr_flags;
+ hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+ hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+ hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, pld_len);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct scsi_cmnd *sc_cmd;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_scsi_rsp *hdr;
+ struct iscsi_data_in_hdr *cqe_data_in;
+ int datalen = 0;
+ struct qedi_conn *qedi_conn;
+ u32 iscsi_cid;
+ bool mark_cmd_node_deleted = false;
+ u8 cqe_err_bits = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ spin_lock_bh(&session->back_lock);
+ /* get the scsi command */
+ sc_cmd = cmd->scsi_cmd;
+
+ if (!sc_cmd) {
+ QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+ goto error;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "SCp.ptr is NULL, returned in another context.\n");
+ goto error;
+ }
+
+ if (!sc_cmd->request) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "sc_cmd->request is NULL, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->special) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->special is NULL so request not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->q) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ qedi_iscsi_unmap_sg_list(cmd);
+
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
+ hdr->opcode = cqe_data_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+ hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ hdr->response = cqe_data_in->reserved1;
+ hdr->cmd_status = cqe_data_in->status_rsvd;
+ hdr->flags = cqe_data_in->flags;
+ hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+ if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+ datalen = cqe_data_in->reserved2 &
+ ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+ memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+ }
+
+ /* If f/w reports data underrun err then set residual to IO transfer
+ * length, set Underrun flag and clear Overrun flag explicitly
+ */
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+ hdr->itt, cqe_data_in->flags, cmd->task_id,
+ qedi_conn->iscsi_conn_id, hdr->residual_count,
+ scsi_bufflen(sc_cmd));
+ hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+ }
+
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ mark_cmd_node_deleted = true;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+error:
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *conn, uint16_t que_idx)
+{
+ struct iscsi_conn *iscsi_conn;
+ u32 hdr_opcode;
+
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ iscsi_conn = conn->cls_conn->dd_data;
+
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_SCSI_RESPONSE:
+ case ISCSI_OPCODE_DATA_IN:
+ qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+ break;
+ case ISCSI_OPCODE_LOGIN_RESPONSE:
+ qedi_process_login_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TMF_RESPONSE:
+ qedi_process_tmf_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TEXT_RESPONSE:
+ qedi_process_text_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_LOGOUT_RESPONSE:
+ qedi_process_logout_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+ }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+ "itid=0x%x, cmd task id=0x%x\n",
+ cqe->itid, cmd->task_id);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct qedi_work_map *work, *work_tmp;
+ u32 proto_itt = cqe->itid;
+ u32 ptmp_itt = 0;
+ itt_t protoitt = 0;
+ int found = 0;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 rtid = 0;
+ u32 iscsi_cid;
+ struct qedi_conn *qedi_conn;
+ struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct iscsi_task *mtask;
+ struct iscsi_tm *tmf_hdr = NULL;
+
+ iscsi_cid = cqe->conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ /* Based on this itt get the corresponding qedi_cmd */
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+ list) {
+ if (work->rtid == proto_itt) {
+ /* We found the command */
+ qedi_cmd = work->qedi_cmd;
+ if (!qedi_cmd->list_tmf_work) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+ proto_itt, qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+ found = 1;
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ rtid = work->rtid;
+
+ list_del_init(&work->list);
+ kfree(work);
+ qedi_cmd->list_tmf_work = NULL;
+ }
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ if (found) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ spin_lock_bh(&conn->session->back_lock);
+
+ protoitt = build_itt(get_itt(tmf_hdr->rtt),
+ conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+
+ spin_unlock_bh(&conn->session->back_lock);
+
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt),
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ dbg_cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt),
+ dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+ qedi_cmd->state = CLEANUP_RECV;
+
+ qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ }
+ } else if (qedi_conn->cmd_cleanup_req > 0) {
+ spin_lock_bh(&conn->session->back_lock);
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ spin_unlock_bh(&conn->session->back_lock);
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "task is null, itid=0x%x, cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ return;
+ }
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
+ cmd_new = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+ } else {
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ WARN_ON(1);
+ }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+ struct qedi_ctx *qedi = work->qedi;
+ union iscsi_cqe *cqe = &work->cqe;
+ struct iscsi_task *task = NULL;
+ struct iscsi_nopout *nopout_hdr;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 comp_type;
+ u32 iscsi_cid;
+ u32 hdr_opcode;
+ u16 que_idx = work->que_idx;
+ u8 cqe_err_bits = 0;
+
+ comp_type = cqe->cqe_common.cqe_type;
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+ cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+ if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+ return;
+ }
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return;
+ }
+
+ conn = q_conn->cls_conn->dd_data;
+
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits,
+ CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return;
+ }
+
+ switch (comp_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+ task = qedi_cmd->task;
+ if (!task) {
+ QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+ return;
+ }
+
+ /* Process NOPIN local completion */
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ if ((nopout_hdr->itt == RESERVED_ITT) &&
+ (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+ qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+ task, q_conn);
+ } else {
+ cqe->cqe_solicited.itid =
+ qedi_get_itt(cqe->cqe_solicited);
+ /* Process other solicited responses */
+ qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+ }
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_ASYNC_MSG:
+ qedi_process_async_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_REJECT:
+ qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ }
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_DUMMY:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+ conn);
+ goto exit_fp_process;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+ break;
+ }
+
+exit_fp_process:
+ return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+ u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+ struct iscsi_wqe *wqe;
+ struct iscsi_wqe_field *cont_field;
+ struct qedi_endpoint *ep;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_login_req *login_hdr;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+ ep = qedi_conn->ep;
+ wqe = &ep->sq[ep->sq_prod_idx];
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ if (is_cleanup) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ wqe->task_id = tid;
+ return;
+ }
+
+ if (ptu_invalidate) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+ ISCSI_WQE_SET_PTU_INVALIDATE);
+ }
+
+ cont_field = &wqe->cont_prevtid_union.cont_field;
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ case ISCSI_OP_TEXT:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ 1);
+ cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+ break;
+ case ISCSI_OP_LOGOUT:
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_SCSI_TMFUNC:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ break;
+ default:
+ if (!sc)
+ break;
+
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ cont_field->contlen_cdbsize_field =
+ (sc->sc_data_direction == DMA_TO_DEVICE) ?
+ scsi_bufflen(sc) : 0;
+ if (cmd->use_slowpath)
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+ else
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ (sc->sc_data_direction ==
+ DMA_TO_DEVICE) ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ break;
+ }
+
+ wqe->task_id = tid;
+ /* Make sure SQ data is coherent */
+ wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_db_data dbell = { 0 };
+
+ dbell.agg_flags = 0;
+
+ dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+ dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+ dbell.params |=
+ DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+ dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+ writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+ /* Make sure fw write idx is coherent, and include both memory barriers
+ * as a failsafe as for some architectures the call is the same but on
+ * others they are two different assembly operations.
+ */
+ wmb();
+ mmiowb();
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+ "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+ qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+ qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_login_req *login_hdr;
+ struct iscsi_login_req_hdr *fw_login_req = NULL;
+ struct iscsi_cached_sge_ctx *cached_sge = NULL;
+ struct iscsi_sge *single_sge = NULL;
+ struct iscsi_sge *req_sge = NULL;
+ struct iscsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+ fw_login_req->opcode = login_hdr->opcode;
+ fw_login_req->version_min = login_hdr->min_version;
+ fw_login_req->version_max = login_hdr->max_version;
+ fw_login_req->flags_attr = login_hdr->flags;
+ fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+ fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+ fw_login_req->tsih = login_hdr->tsih;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_login_req->cid = qedi_conn->iscsi_conn_id;
+ fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ fw_login_req->exp_stat_sn = 0;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(login_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(login_hdr->dlength);
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+ struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout *logout_hdr = NULL;
+ struct qedi_cmd *qedi_cmd = NULL;
+ s16 tid = 0;
+ s16 ptu_invalidate = 0;
+
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+ fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+ fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery)
+{
+ int rval;
+ struct iscsi_task *ctask;
+ struct qedi_cmd *cmd, *cmd_tmp;
+ struct iscsi_tm *tmf_hdr;
+ unsigned int lun = 0;
+ bool lun_reset = false;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ /* From recovery, task is NULL or from tmf resp valid task */
+ if (task) {
+ tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+ lun_reset = true;
+ lun = scsilun_to_int(&tmf_hdr->lun);
+ }
+ }
+
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+ qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+ in_recovery, lun_reset);
+
+ if (lun_reset)
+ spin_lock_bh(&session->back_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ ctask = cmd->task;
+ if (ctask == task)
+ continue;
+
+ if (lun_reset) {
+ if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(ctask->itt),
+ cmd->scsi_cmd, cmd->scsi_cmd->device,
+ ctask->state, cmd->state,
+ qedi_conn->iscsi_conn_id);
+ if (cmd->scsi_cmd->device->lun != lun)
+ continue;
+ }
+ }
+ qedi_conn->cmd_cleanup_req++;
+ qedi_iscsi_cleanup_task(ctask, true);
+
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+ &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+
+ if (lun_reset)
+ spin_unlock_bh(&session->back_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cmd_cleanup_req=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->iscsi_conn_id);
+
+ rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl) ||
+ qedi_conn->ep),
+ 5 * HZ);
+ if (rval) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ return 0;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_missing);
+ qedi_ops->common->drain(qedi->cdev);
+
+ /* Enable IOs for all other sessions except current.*/
+ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ (qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl),
+ 5 * HZ)) {
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+ return -1;
+ }
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+
+ return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot proceed, ep already disconnected, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+ qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+ qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fatal error, need hard reset, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ struct qedi_cmd *qedi_cmd,
+ struct qedi_work_map *list_work)
+{
+ struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+ int wait;
+
+ wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_cmd->state ==
+ CLEANUP_RECV) ||
+ ((qedi_cmd->type == TYPEIO) &&
+ (cmd->state ==
+ RESPONSE_RECEIVED))),
+ 5 * HZ);
+ if (!wait) {
+ qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ return -1;
+ }
+ return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_cls_session *cls_sess;
+ struct qedi_work_map *list_work = NULL;
+ struct iscsi_task *mtask;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ s16 rval = 0;
+ s16 tid = 0;
+
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+ goto abort_ret;
+ }
+
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+ qedi_conn->iscsi_conn_id);
+
+ if (do_not_recover) {
+ QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+ do_not_recover);
+ goto abort_ret;
+ }
+
+ list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ if (!list_work) {
+ QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ goto abort_ret;
+ }
+
+ qedi_cmd->type = TYPEIO;
+ list_work->qedi_cmd = qedi_cmd;
+ list_work->rtid = cmd->task_id;
+ list_work->state = QEDI_WORK_SCHEDULED;
+ qedi_cmd->list_tmf_work = list_work;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+ list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+ tmf_hdr->flags);
+
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ qedi_iscsi_cleanup_task(ctask, false);
+
+ rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+ list_work);
+ if (rval == -1) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "FW cleanup got escalated, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ qedi_cmd->task_id = tid;
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ return;
+
+ldel_exit:
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ if (!qedi_cmd->list_tmf_work) {
+ list_del_init(&list_work->list);
+ qedi_cmd->list_tmf_work = NULL;
+ kfree(list_work);
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_tmf_request_hdr *fw_tmf_request;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 tid = 0, ptu_invalidate = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+ tid = qedi_cmd->task_id;
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+ fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+ fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get reference task\n");
+ return 0;
+ }
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ fw_tmf_request->rtt =
+ qedi_set_itt(cmd->task_id,
+ get_itt(tmf_hdr->rtt));
+ } else {
+ fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ fw_tmf_request->opcode = tmf_hdr->opcode;
+ fw_tmf_request->function = tmf_hdr->flags;
+ fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+ tid, mtask->itt, qedi_conn->iscsi_conn_id);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ s16 tid = 0;
+
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd->task = mtask;
+
+ /* If abort task then schedule the work and return */
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ qedi_cmd->state = CLEANUP_WAIT;
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+ } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+ qedi_cmd->task_id = tid;
+
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_text_request_hdr *fw_text_request;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_text *text_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ text_hdr = (struct iscsi_text *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_text_request =
+ &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+ fw_text_request->opcode = text_hdr->opcode;
+ fw_text_request->flags_attr = text_hdr->flags;
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_text_request->ttt = text_hdr->ttt;
+ fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(text_hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_nop_out_hdr *fw_nop_out;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_nopout *nopout_hdr;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ return -ENOMEM;
+ }
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+ memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+ fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+ if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+ fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+ fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ } else {
+ fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+ }
+
+ fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+ fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+
+ while (sg_len) {
+ if (addr % QEDI_PAGE_SIZE)
+ frag_size =
+ (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+ else
+ frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+ (sg_len % QEDI_BD_SPLIT_SZ);
+
+ if (frag_size == 0)
+ frag_size = QEDI_BD_SPLIT_SZ;
+
+ bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+ bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+ QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+ "split sge %d: addr=%llx, len=%x",
+ (bd_index + sg_frags), addr, frag_size);
+
+ addr += (u64)frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ int sg_frags;
+ u64 addr, end_addr;
+ int i;
+
+ WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+
+ /*
+ * New condition to send single SGE as cached-SGL.
+ * Single SGE with length less than 64K.
+ */
+ sg = scsi_sglist(sc);
+ if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+
+ bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_count].sge_addr.hi = (addr >> 32);
+ bd[bd_count].sge_len = (u16)sg_len;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+ sg_count, addr, sg_len);
+
+ return ++bd_count;
+ }
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ end_addr = (addr + sg_len);
+
+ /*
+ * first sg elem in the 'list',
+ * check if end addr is page-aligned.
+ */
+ if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * last sg elem in the 'list',
+ * check if start addr is page-aligned.
+ */
+ else if ((i == (sg_count - 1)) &&
+ (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * middle sg elements in list,
+ * check if start and end addr is page-aligned
+ */
+ else if ((i != 0) && (i != (sg_count - 1)) &&
+ ((addr % QEDI_PAGE_SIZE) ||
+ (end_addr % QEDI_PAGE_SIZE)))
+ cmd->use_slowpath = true;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+ i, sg_len);
+
+ if (sg_len > QEDI_BD_SPLIT_SZ) {
+ sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+ } else {
+ sg_frags = 1;
+ bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+ bd[bd_count].sge_addr.hi = addr >> 32;
+ bd[bd_count].sge_len = sg_len;
+ }
+ byte_count += sg_len;
+ bd_count += sg_frags;
+ }
+
+ if (byte_count != scsi_bufflen(sc))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+ scsi_bufflen(sc));
+ else
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+ byte_count);
+
+ WARN_ON(byte_count != scsi_bufflen(sc));
+
+ return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+ int bd_count;
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+ if (bd_count == 0)
+ return;
+ } else {
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+ bd[0].sge_addr.lo = 0;
+ bd[0].sge_addr.hi = 0;
+ bd[0].sge_len = 0;
+ bd_count = 0;
+ }
+ cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+
+ lpcnt = sc->cmd_len / sizeof(dword);
+ srcp = (u8 *)sc->cmnd;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *)srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction)
+{
+ struct qedi_io_log *io_log;
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct scsi_cmnd *sc_cmd = task->sc;
+ unsigned long flags;
+ u8 op;
+
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+ io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+ io_log->direction = direction;
+ io_log->task_id = tid;
+ io_log->cid = qedi_conn->iscsi_conn_id;
+ io_log->lun = sc_cmd->device->lun;
+ io_log->op = sc_cmd->cmnd[0];
+ op = sc_cmd->cmnd[0];
+ io_log->lba[0] = sc_cmd->cmnd[2];
+ io_log->lba[1] = sc_cmd->cmnd[3];
+ io_log->lba[2] = sc_cmd->cmnd[4];
+ io_log->lba[3] = sc_cmd->cmnd[5];
+ io_log->bufflen = scsi_bufflen(sc_cmd);
+ io_log->sg_count = scsi_sg_count(sc_cmd);
+ io_log->fast_sgs = qedi->fast_sgls;
+ io_log->cached_sgs = qedi->cached_sgls;
+ io_log->slow_sgs = qedi->slow_sgls;
+ io_log->cached_sge = qedi->use_cached_sge;
+ io_log->slow_sge = qedi->use_slow_sge;
+ io_log->fast_sge = qedi->use_fast_sge;
+ io_log->result = sc_cmd->result;
+ io_log->jiffies = jiffies;
+ io_log->blk_req_cpu = smp_processor_id();
+
+ if (direction == QEDI_IO_TRACE_REQ) {
+ /* For requests we only care about the submission CPU */
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = 0;
+ io_log->blk_rsp_cpu = 0;
+ } else if (direction == QEDI_IO_TRACE_RSP) {
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = qedi->intr_cpu;
+ io_log->blk_rsp_cpu = smp_processor_id();
+ }
+
+ qedi->io_trace_idx++;
+ if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+ qedi->io_trace_idx = 0;
+
+ qedi->use_cached_sge = false;
+ qedi->use_slow_sge = false;
+ qedi->use_fast_sge = false;
+
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_phys_sgl_ctx *phys_sgl;
+ struct iscsi_virt_sgl_ctx *virt_sgl;
+ struct ystorm_iscsi_task_st_ctx *yst_cxt;
+ struct mstorm_iscsi_task_st_ctx *mst_cxt;
+ struct iscsi_sgl *sgl_struct;
+ struct iscsi_sge *single_sge;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ enum iscsi_task_type task_type;
+ struct iscsi_cmd_hdr *fw_cmd;
+ u32 lun[2];
+ u32 exp_data;
+ u16 cq_idx = smp_processor_id() % qedi->num_queues;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+ u8 num_fast_sgs;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ qedi_iscsi_map_sg_list(cmd);
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ if (conn->session->initial_r2t_en) {
+ exp_data = min((conn->session->imm_data_en *
+ conn->max_xmit_dlength),
+ conn->session->first_burst);
+ exp_data = min(exp_data, scsi_bufflen(sc));
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(exp_data);
+ } else {
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ min(conn->session->first_burst, scsi_bufflen(sc));
+ }
+
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ if (scsi_bufflen(sc))
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
+
+ fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+ fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, cmd);
+ fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+ fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ fw_cmd->opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+ fw_task_ctx->mstorm_st_context.sense_db.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.task_type = task_type;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ num_fast_sgs = (cmd->io_tbl.sge_valid ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+ cmd->io_tbl.sge_valid);
+
+ yst_cxt = &fw_task_ctx->ystorm_st_context;
+ mst_cxt = &fw_task_ctx->mstorm_st_context;
+ /* Tx path */
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ /* not considering superIO or FastIO */
+ if (cmd->io_tbl.sge_valid == 1) {
+ cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+ cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+ cached_sge->sge.sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+ phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ phys_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES,
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid));
+ virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+ virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ virt_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ virt_sgl->sgl_initial_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ } else {
+ /* Rx path */
+ if (cmd->io_tbl.sge_valid == 1) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ single_sge = &mst_cxt->sgl_union.single_sge;
+ single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+ single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+ single_sge->sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ sgl_struct->byte_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ }
+
+ if (cmd->io_tbl.sge_valid == 1)
+ /* Singel-SGL */
+ qedi->use_cached_sge = true;
+ else {
+ if (cmd->use_slowpath)
+ qedi->use_slow_sge = true;
+ else
+ qedi->use_fast_sge = true;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+ "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+ "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+ (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+ cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+ return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ s16 ptu_invalidate = 0;
+
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(task->itt), task->state,
+ cmd->state, qedi_conn->iscsi_conn_id);
+
+ qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644
index 000000000000..8e488de88ece
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+ bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644
index 000000000000..8ca44c78f093
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+ __le16 conn_id;
+ u8 invalid_command;
+ u8 cmd_hdr_type;
+ __le32 reserved1[2];
+ __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+ ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+ ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+ ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+ MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644
index 000000000000..d6a205433b66
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+ struct qedi_conn *qedi_conn;
+ int i;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi_conn = qedi_get_conn_from_id(qedi, i);
+ if (!qedi_conn)
+ continue;
+
+ qedi_start_conn_recovery(qedi, qedi_conn);
+ }
+
+ return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *shost = cmd->device->host;
+ struct qedi_ctx *qedi;
+
+ qedi = iscsi_host_priv(shost);
+
+ return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+ .proc_name = QEDI_MODULE_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .eh_host_reset_handler = qedi_eh_host_reset,
+ .target_alloc = iscsi_target_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = QEDI_MAX_ISCSI_TASK,
+ .this_id = -1,
+ .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+ .max_sectors = 0xffff,
+ .cmd_per_lun = 128,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ if (qedi_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.resp_bd_tbl,
+ qedi_conn->gen_pdu.resp_bd_dma);
+ qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ qedi_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_buf)
+ goto login_req_buf_failure;
+
+ qedi_conn->gen_pdu.req_buf_size = 0;
+ qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+ qedi_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_buf)
+ goto login_resp_buf_failure;
+
+ qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+ qedi_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_bd_tbl)
+ goto login_req_bd_tbl_failure;
+
+ qedi_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_bd_tbl)
+ goto login_resp_bd_tbl_failure;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+ "Allocation successful, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.sge_tbl)
+ dma_free_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_sge),
+ cmd->io_tbl.sge_tbl,
+ cmd->io_tbl.sge_tbl_dma);
+
+ if (cmd->sense_buffer)
+ dma_free_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer,
+ cmd->sense_buffer_dma);
+ }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+ struct qedi_cmd *cmd)
+{
+ struct qedi_io_bdt *io = &cmd->io_tbl;
+ struct iscsi_sge *sge;
+
+ io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(*sge),
+ &io->sge_tbl_dma, GFP_KERNEL);
+ if (!io->sge_tbl) {
+ iscsi_session_printk(KERN_ERR, session,
+ "Could not allocate BD table.\n");
+ return -ENOMEM;
+ }
+
+ io->sge_valid = 0;
+ return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (qedi_alloc_sget(qedi, session, cmd))
+ goto free_sgets;
+
+ cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!cmd->sense_buffer)
+ goto free_sgets;
+ }
+
+ return 0;
+
+free_sgets:
+ qedi_destroy_cmd_pool(qedi, session);
+ return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+ u16 qdepth, uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+
+ if (!ep)
+ return NULL;
+
+ qedi_ep = ep->dd_data;
+ shost = qedi_ep->qedi->shost;
+ qedi = iscsi_host_priv(shost);
+
+ if (cmds_max > qedi->max_sqes)
+ cmds_max = qedi->max_sqes;
+ else if (cmds_max < QEDI_SQ_WQES_MIN)
+ cmds_max = QEDI_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+ cmds_max, 0, sizeof(struct qedi_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup session for ep=%p\n", qedi_ep);
+ return NULL;
+ }
+
+ if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+ goto session_teardown;
+ }
+
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+ qedi_destroy_cmd_pool(qedi, session);
+ iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_conn *qedi_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+ cid);
+ if (!cls_conn) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+ cid, cls_session);
+ return NULL;
+ }
+
+ conn = cls_conn->dd_data;
+ qedi_conn = conn->dd_data;
+ qedi_conn->cls_conn = cls_conn;
+ qedi_conn->qedi = qedi;
+ qedi_conn->ep = NULL;
+ qedi_conn->active_cmd_count = 0;
+ INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+ spin_lock_init(&qedi_conn->list_lock);
+
+ if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+ cid, cls_session);
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+ iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+ iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+ if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n",
+ iscsi_cid);
+ return -EBUSY;
+ }
+
+ qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+ return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+ if (!qedi->cid_que.conn_cid_tbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= qedi->max_active_conns) {
+ QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_endpoint *qedi_ep;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ qedi_ep = ep->dd_data;
+ if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ qedi_ep->conn = qedi_conn;
+ qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+ qedi_conn->fw_cid = qedi_ep->fw_cid;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+ return -EINVAL;
+
+ spin_lock_init(&qedi_conn->tmf_work_lock);
+ INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+ init_waitqueue_head(&qedi_conn->wait_queue);
+ return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct qed_iscsi_params_update *conn_info;
+ struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ conn_info->update_flag = 0;
+
+ if (conn->hdrdgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+ if (conn->datadgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+ if (conn->session->initial_r2t_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+ true);
+ if (conn->session->imm_data_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+ true);
+
+ conn_info->max_seq_size = conn->session->max_burst;
+ conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+ conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+ conn_info->first_seq_length = conn->session->first_burst;
+ conn_info->exp_stat_sn = conn->exp_statsn;
+
+ rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+ conn_info);
+ if (rval) {
+ rval = -ENXIO;
+ QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+ goto update_conn_err;
+ }
+
+ kfree(conn_info);
+ rval = 0;
+
+update_conn_err:
+ return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+ u16 mss = 0;
+ u16 hdrs = TCP_HDR_LEN;
+
+ if (is_ipv6)
+ hdrs += IPV6_HDR_LEN;
+ else
+ hdrs += IPV4_HDR_LEN;
+
+ if (vlan_en)
+ hdrs += VLAN_LEN;
+
+ mss = pmtu - hdrs;
+
+ if (tcp_ts_en)
+ mss -= TCP_OPTION_LEN;
+
+ if (!mss)
+ mss = DEF_MSS;
+
+ return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+ struct qedi_ctx *qedi = qedi_ep->qedi;
+ struct qed_iscsi_params_offload *conn_info;
+ int rval;
+ int i;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate memory ep=%p\n", qedi_ep);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+ ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+ conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+ conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ conn_info->ip_version = 0;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ } else {
+ for (i = 1; i < 4; i++) {
+ conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+ conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+ }
+
+ conn_info->ip_version = 1;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ }
+
+ conn_info->src.port = qedi_ep->src_port;
+ conn_info->dst.port = qedi_ep->dst_port;
+
+ conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+ conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+ conn_info->vlan_id = qedi_ep->vlan_id;
+
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+ conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+ conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+ conn_info->dup_ack_theshold = 3;
+ conn_info->rcv_wnd = 65535;
+ conn_info->cwnd = DEF_MAX_CWND;
+
+ conn_info->ss_thresh = 65535;
+ conn_info->srtt = 300;
+ conn_info->rtt_var = 150;
+ conn_info->flow_label = 0;
+ conn_info->ka_timeout = DEF_KA_TIMEOUT;
+ conn_info->ka_interval = DEF_KA_INTERVAL;
+ conn_info->max_rt_time = DEF_MAX_RT_TIME;
+ conn_info->ttl = DEF_TTL;
+ conn_info->tos_or_tc = DEF_TOS;
+ conn_info->remote_port = qedi_ep->dst_port;
+ conn_info->local_port = qedi_ep->src_port;
+
+ conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+ (qedi_ep->ip_type == TCP_IPV6),
+ 1, (qedi_ep->vlan_id != 0));
+
+ conn_info->rcv_wnd_scale = 4;
+ conn_info->ts_ticks_per_second = 1000;
+ conn_info->da_timeout_value = 200;
+ conn_info->ack_frequency = 2;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Default cq index [%d], mss [%d]\n",
+ conn_info->default_cq, conn_info->mss);
+
+ rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+ rval, qedi_ep);
+
+ kfree(conn_info);
+ return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_ctx *qedi;
+ int rval;
+
+ qedi = qedi_conn->qedi;
+
+ rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_start: FW oflload conn failed.\n");
+ rval = -EINVAL;
+ goto start_err;
+ }
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_conn->abrt_conn = 0;
+
+ rval = iscsi_conn_start(cls_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "iscsi_conn_start: FW oflload conn failed!!\n");
+ }
+
+start_err:
+ return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+
+ qedi_conn_free_login_resources(qedi, qedi_conn);
+ iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct qedi_endpoint *qedi_ep = ep->dd_data;
+ int len;
+
+ if (!qedi_ep)
+ return -ENOTCONN;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (qedi_ep->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+ break;
+ default:
+ return -ENOTCONN;
+ }
+
+ return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct qedi_ctx *qedi;
+ int len;
+
+ qedi = iscsi_host_priv(shost);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, qedi->mac, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "host%d\n", shost->host_no);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (qedi->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi->src_ip);
+ else
+ len = sprintf(buf, "%pI6\n", qedi->src_ip);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qed_iscsi_stats iscsi_stats;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+ qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+ conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+ conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+ conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+ conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+ conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_sge *bd_tbl;
+
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+ bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+ qedi_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct qedi_cmd *cmd = task->dd_data;
+ struct qedi_conn *qedi_conn = cmd->conn;
+ char *buf;
+ int data_len;
+ int rc = 0;
+
+ qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ qedi_send_iscsi_login(qedi_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = qedi_conn->gen_pdu.req_buf_size;
+ buf = qedi_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ buf, data_len, 1);
+ else
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = qedi_send_iscsi_logout(qedi_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = qedi_iscsi_abort_work(qedi_conn, task);
+ break;
+ case ISCSI_OP_TEXT:
+ rc = qedi_send_iscsi_text(qedi_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "unsupported op 0x%x\n", task->hdr->opcode);
+ }
+
+ return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+ if (task->data_count) {
+ memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ qedi_conn->gen_pdu.req_wr_ptr =
+ qedi_conn->gen_pdu.req_buf + task->data_count;
+ }
+
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+
+ cmd->state = 0;
+ cmd->task = NULL;
+ cmd->use_slowpath = false;
+ cmd->conn = qedi_conn;
+ cmd->task = task;
+ cmd->io_cmd_in_list = false;
+ INIT_LIST_HEAD(&cmd->io_cmd);
+
+ if (!sc)
+ return qedi_mtask_xmit(conn, task);
+
+ cmd->scsi_cmd = sc;
+ return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct qedi_ctx *qedi;
+ struct iscsi_endpoint *ep;
+ struct qedi_endpoint *qedi_ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct qed_dev *cdev = NULL;
+ struct qedi_uio_dev *udev = NULL;
+ struct iscsi_path path_req;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ u32 iscsi_cid = QEDI_CID_RESERVED;
+ u16 len = 0;
+ char *buf = NULL;
+ int ret;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ERR_PTR(ret);
+ }
+
+ if (do_not_recover) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qedi = iscsi_host_priv(shost);
+ cdev = qedi->cdev;
+ udev = qedi->udev;
+
+ if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+ if (!ep) {
+ QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+ qedi_ep = ep->dd_data;
+ memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi_ep->iscsi_cid = (u32)-1;
+ qedi_ep->qedi = qedi;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+ sizeof(struct in_addr));
+ qedi_ep->dst_port = ntohs(addr->sin_port);
+ qedi_ep->ip_type = TCP_IPV4;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI4, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+ sizeof(struct in6_addr));
+ qedi_ep->dst_port = ntohs(addr6->sin6_port);
+ qedi_ep->ip_type = TCP_IPV6;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI6, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+ }
+
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ ret = -ENXIO;
+ goto ep_conn_exit;
+ }
+
+ ret = qedi_alloc_sq(qedi, qedi_ep);
+ if (ret)
+ goto ep_conn_exit;
+
+ ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+ &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+ ret = -ENXIO;
+ goto ep_free_sq;
+ }
+
+ iscsi_cid = qedi_ep->handle;
+ qedi_ep->iscsi_cid = iscsi_cid;
+
+ init_waitqueue_head(&qedi_ep->ofld_wait);
+ init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+ qedi_ep->state = EP_STATE_OFLDCONN_START;
+ qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+ buf = (char *)&path_req;
+ len = sizeof(path_req);
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64)qedi_ep->iscsi_cid;
+ path_req.pmtu = qedi->ll2_mtu;
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ } else {
+ memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ }
+
+ ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+ len);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+ iscsi_cid, ret);
+ goto ep_rel_conn;
+ }
+
+ atomic_inc(&qedi->num_offloads);
+ return ep;
+
+ep_rel_conn:
+ qedi->ep_tbl[iscsi_cid] = NULL;
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+ ret);
+ep_free_sq:
+ qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+
+ if (do_not_recover)
+ return 1;
+
+ qedi_ep = ep->dd_data;
+ if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ return -1;
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+ ret = 1;
+
+ ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+ QEDI_OFLD_WAIT_STATE(qedi_ep),
+ msecs_to_jiffies(timeout_ms));
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ ret = -1;
+
+ if (ret > 0)
+ return 1;
+ else if (!ret)
+ return 0;
+ else
+ return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+ struct qedi_cmd *cmd, *cmd_tmp;
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct qedi_endpoint *qedi_ep;
+ struct qedi_conn *qedi_conn = NULL;
+ struct iscsi_conn *conn = NULL;
+ struct qedi_ctx *qedi;
+ int ret = 0;
+ int wait_delay = 20 * HZ;
+ int abrt_conn = 0;
+ int count = 10;
+
+ qedi_ep = ep->dd_data;
+ qedi = qedi_ep->qedi;
+
+ flush_work(&qedi_ep->offload_work);
+
+ if (qedi_ep->conn) {
+ qedi_conn = qedi_ep->conn;
+ conn = qedi_conn->cls_conn->dd_data;
+ iscsi_suspend_queue(conn);
+ abrt_conn = qedi_conn->abrt_conn;
+
+ while (count--) {
+ if (!test_bit(QEDI_CONN_FW_CLEANUP,
+ &qedi_conn->flags)) {
+ break;
+ }
+ msleep(1000);
+ }
+
+ if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ if (do_not_recover) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Do not recover cid=0x%x\n",
+ qedi_ep->iscsi_cid);
+ goto ep_exit_recover;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+ qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+ qedi_cleanup_active_cmd_list(qedi_conn);
+ goto ep_release_conn;
+ }
+ }
+
+ if (do_not_recover)
+ goto ep_exit_recover;
+
+ switch (qedi_ep->state) {
+ case EP_STATE_OFLDCONN_START:
+ goto ep_release_conn;
+ case EP_STATE_OFLDCONN_FAILED:
+ break;
+ case EP_STATE_OFLDCONN_COMPL:
+ if (unlikely(!qedi_conn))
+ break;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+ qedi_conn->active_cmd_count, abrt_conn,
+ qedi_ep->state,
+ qedi_ep->iscsi_cid,
+ qedi_ep->conn
+ );
+
+ if (!qedi_conn->active_cmd_count)
+ abrt_conn = 0;
+ else
+ abrt_conn = 1;
+
+ if (abrt_conn)
+ qedi_clearsq(qedi, qedi_conn, NULL);
+ break;
+ default:
+ break;
+ }
+
+ qedi_ep->state = EP_STATE_DISCONN_START;
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ if (ret) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "destroy_conn failed returned %d\n", ret);
+ } else {
+ ret = wait_event_interruptible_timeout(
+ qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state !=
+ EP_STATE_DISCONN_START),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+ ret, wait_delay, qedi_ep->iscsi_cid);
+ }
+ }
+
+ep_release_conn:
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx,
+ "release_conn returned %d, cid=0x%x\n",
+ ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+ qedi_free_sq(qedi, qedi_ep);
+
+ if (qedi_conn)
+ qedi_conn->ep = NULL;
+
+ qedi_ep->conn = NULL;
+ qedi_ep->qedi = NULL;
+ atomic_dec(&qedi->num_offloads);
+
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+ struct qed_dev *cdev = qedi->cdev;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct sk_buff *skb;
+ u32 len;
+ int rc = 0;
+
+ udev = qedi->udev;
+ if (!udev) {
+ QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+ return -EINVAL;
+ }
+
+ uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+ if (!uctrl) {
+ QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+ return -EINVAL;
+ }
+
+ len = uctrl->host_tx_pkt_len;
+ if (!len) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+ return -EINVAL;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, udev->tx_pkt, len);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (vlanid)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+ rc = qedi_ops->ll2->start_xmit(cdev, skb);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+ rc);
+ kfree_skb(skb);
+ }
+
+ uctrl->host_tx_pkt_len = 0;
+ uctrl->hw_tx_cons++;
+
+ return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 20 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+ u32 iscsi_cid;
+ u16 port_id = 0;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ret;
+ }
+
+ if (strcmp(shost->hostt->proc_name, "qedi")) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost %s is invalid\n",
+ shost->hostt->proc_name);
+ return ret;
+ }
+
+ qedi = iscsi_host_priv(shost);
+ if (path_data->handle == QEDI_PATH_HANDLE) {
+ ret = qedi_data_avail(qedi, path_data->vlan_id);
+ goto set_path_exit;
+ }
+
+ iscsi_cid = (u32)path_data->handle;
+ qedi_ep = qedi->ep_tbl[iscsi_cid];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+ if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+ QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ ret = -EIO;
+ goto set_path_exit;
+ }
+
+ ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+ ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+ qedi_ep->vlan_id = path_data->vlan_id;
+ if (path_data->pmtu < DEF_PATH_MTU) {
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "MTU cannot be %u, using default MTU %u\n",
+ path_data->pmtu, qedi_ep->pmtu);
+ }
+
+ if (path_data->pmtu != qedi->ll2_mtu) {
+ if (path_data->pmtu > JUMBO_MTU) {
+ ret = -EINVAL;
+ QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+ goto set_path_exit;
+ }
+
+ qedi_reset_host_mtu(qedi, path_data->pmtu);
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ }
+
+ port_id = qedi_ep->src_port;
+ if (port_id >= QEDI_LOCAL_PORT_MIN &&
+ port_id < QEDI_LOCAL_PORT_MAX) {
+ if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+ port_id = 0;
+ } else {
+ port_id = 0;
+ }
+
+ if (!port_id) {
+ port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+ if (port_id == QEDI_LOCAL_PORT_INVALID) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate port id for iscsi_cid=0x%x\n",
+ iscsi_cid);
+ ret = -ENOMEM;
+ goto set_path_exit;
+ }
+ }
+
+ qedi_ep->src_port = port_id;
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ qedi->ip_type = TCP_IPV4;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ qedi->ip_type = TCP_IPV6;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ }
+
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+ ret = 0;
+
+set_path_exit:
+ return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return 0444;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
+ return 0444;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+ if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+ atomic_read(&task->refcount));
+ return;
+ }
+
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = QEDI_MODULE_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+ .create_session = qedi_session_create,
+ .destroy_session = qedi_session_destroy,
+ .create_conn = qedi_conn_create,
+ .bind_conn = qedi_conn_bind,
+ .start_conn = qedi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qedi_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_ep_param = qedi_ep_get_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = qedi_host_get_param,
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = qedi_conn_get_stats,
+ .xmit_task = qedi_task_xmit,
+ .cleanup_task = qedi_cleanup_task,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .ep_connect = qedi_ep_connect,
+ .ep_poll = qedi_ep_poll,
+ .ep_disconnect = qedi_ep_disconnect,
+ .set_path = qedi_set_path,
+ .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
+ cls_sess = iscsi_conn_to_session(cls_conn);
+
+ if (iscsi_is_session_online(cls_sess)) {
+ qedi_conn->abrt_conn = 1;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failing connection, state=0x%x, cid=0x%x\n",
+ conn->session->state, qedi_conn->iscsi_conn_id);
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ }
+}
+
+static const struct {
+ enum iscsi_error_types error_code;
+ char *err_string;
+} qedi_iscsi_error[] = {
+ { ISCSI_STATUS_NONE,
+ "tcp_error none"
+ },
+ { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ "task cid mismatch"
+ },
+ { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ "rq ring full"
+ },
+ { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ "cmdq ring full"
+ },
+ { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ "sge caching failed"
+ },
+ { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ "hdr digest error"
+ },
+ { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ "local cmpl error"
+ },
+ { ISCSI_CONN_ERROR_DATA_OVERRUN,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ "out of sge error"
+ },
+ { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ "tcp seg ip options error"
+ },
+ { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ "tcp ip fragment error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ "AHS len protocol error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ "itt out of range error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ "data seg more than pdu size"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ "invalid opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ "invalid opcode before update"
+ },
+ { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ "unexpected opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ "r2t carries no data"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ "data sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ "data TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ "r2t TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ "buffer offset error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ "buffer offset ooo"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ "data seg len 0"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ "data xer len error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ "data xer len1 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ "data xer len2 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ "protocol lun error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ "f bit zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ "exp stat sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ "dsl not zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ "invalid dsl"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ "data seg len too big"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ "outstanding r2t count error"
+ },
+ { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ "sense datalen error"
+ },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+ int i;
+ char *msg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+ if (qedi_iscsi_error[i].error_code == err_code) {
+ msg = qedi_iscsi_error[i].err_string;
+ break;
+ }
+ }
+ return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+ struct qedi_ctx *qedi;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char unknown_msg[] = "Unknown error";
+ char *message;
+ int need_recovery = 0;
+ u32 err_mask = 0;
+ char *msg;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ qedi = ep->qedi;
+
+ QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+ data->error_code);
+
+ if (err_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ msg = qedi_get_iscsi_error(data->error_code);
+ if (!msg) {
+ need_recovery = 0;
+ msg = unknown_msg;
+ }
+
+ iscsi_conn_printk(KERN_ALERT,
+ qedi_conn->cls_conn->dd_data,
+ "qedi: %s - %s\n", message, msg);
+
+ if (need_recovery)
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+ data->error_code);
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644
index 000000000000..d3c06bbddb4e
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT 7200000
+#define DEF_KA_INTERVAL 10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 8000
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 2
+#define DEF_PATH_MTU 1500
+#define DEF_MSS 1460
+#define DEF_LL2_MTU 1560
+#define JUMBO_MTU 9000
+
+#define MIN_MTU 576 /* rfc 793 */
+#define IPV4_HDR_LEN 20
+#define IPV6_HDR_LEN 40
+#define TCP_HDR_LEN 20
+#define TCP_OPTION_LEN 12
+#define VLAN_LEN 4
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_ACQRCONN_START = 0x1,
+ EP_STATE_ACQRCONN_COMPL = 0x2,
+ EP_STATE_OFLDCONN_START = 0x4,
+ EP_STATE_OFLDCONN_COMPL = 0x8,
+ EP_STATE_DISCONN_START = 0x10,
+ EP_STATE_DISCONN_COMPL = 0x20,
+ EP_STATE_CLEANUP_START = 0x40,
+ EP_STATE_CLEANUP_CMPL = 0x80,
+ EP_STATE_TCP_FIN_RCVD = 0x100,
+ EP_STATE_TCP_RST_RCVD = 0x200,
+ EP_STATE_LOGOUT_SENT = 0x400,
+ EP_STATE_LOGOUT_RESP_RCVD = 0x800,
+ EP_STATE_CLEANUP_FAILED = 0x1000,
+ EP_STATE_OFLDCONN_FAILED = 0x2000,
+ EP_STATE_CONNECT_FAILED = 0x4000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+ struct qedi_ctx *qedi;
+ u32 dst_addr[4];
+ u32 src_addr[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ u16 pmtu;
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ u8 ip_type;
+ int state;
+ wait_queue_head_t ofld_wait;
+ wait_queue_head_t tcp_ofld_wait;
+ u32 iscsi_cid;
+ /* identifier of the connection from qed */
+ u32 handle;
+ u32 fw_cid;
+ void __iomem *p_doorbell;
+
+ /* Send queue management */
+ struct iscsi_wqe *sq;
+ dma_addr_t sq_dma;
+
+ u16 sq_prod_idx;
+ u16 fw_sq_prod_idx;
+ u16 sq_con_idx;
+ u32 sq_mem_size;
+
+ void *sq_pbl;
+ dma_addr_t sq_pbl_dma;
+ u32 sq_pbl_size;
+ struct qedi_conn *conn;
+ struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN 16
+
+struct qedi_io_bdt {
+ struct iscsi_sge *sge_tbl;
+ dma_addr_t sge_tbl_dma;
+ u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *ep;
+ struct list_head active_cmd_list;
+ spinlock_t list_lock; /* internal conn lock */
+ u32 active_cmd_count;
+ u32 cmd_cleanup_req;
+ u32 cmd_cleanup_cmpl;
+
+ u32 iscsi_conn_id;
+ int itt;
+ int abrt_conn;
+#define QEDI_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+
+ struct list_head tmf_work_list;
+ wait_queue_head_t wait_queue;
+ spinlock_t tmf_work_lock; /* tmf work lock */
+ unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP 1
+};
+
+struct qedi_cmd {
+ struct list_head io_cmd;
+ bool io_cmd_in_list;
+ struct iscsi_hdr hdr;
+ struct qedi_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct qedi_io_bdt io_tbl;
+ struct iscsi_task_context request;
+ unsigned char *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ u16 task_id;
+
+ /* field populated for tmf work queue */
+ struct iscsi_task *task;
+ struct work_struct tmf_work;
+ int state;
+#define CLEANUP_WAIT 1
+#define CLEANUP_RECV 2
+#define CLEANUP_WAIT_FAILED 3
+#define CLEANUP_NOT_REQUIRED 4
+#define LUN_RESET_RESPONSE_RECEIVED 5
+#define RESPONSE_RECEIVED 6
+
+ int type;
+#define TYPEIO 1
+#define TYPERESET 2
+
+ struct qedi_work_map *list_tmf_work;
+ /* slowpath management */
+ bool use_slowpath;
+
+ struct iscsi_tm_rsp *tmf_resp_buf;
+ struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+ struct list_head list;
+ struct qedi_cmd *qedi_cmd;
+ int rtid;
+
+ int state;
+#define QEDI_WORK_QUEUED 1
+#define QEDI_WORK_SCHEDULED 2
+#define QEDI_WORK_EXIT 3
+
+ struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+ (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644
index 000000000000..19ead8d17e55
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -0,0 +1,2127 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+ " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ struct async_data *data;
+ int rval = 0;
+
+ if (!context || !fw_handle) {
+ QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+ return -EINVAL;
+ }
+
+ qedi = (struct qedi_ctx *)context;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+ data = (struct async_data *)fw_handle;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+ data->cid, data->itid, data->error_code,
+ data->fw_debug_param);
+
+ qedi_ep = qedi->ep_tbl[data->cid];
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot process event, ep already disconnected, cid=0x%x\n",
+ data->cid);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ switch (fw_event_code) {
+ case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+ qedi_ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+ qedi_process_iscsi_error(qedi_ep, data);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+ case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+ qedi_process_tcp_error(qedi_ep, data);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+ fw_event_code);
+ }
+
+ return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (udev->uio_dev != -1)
+ return -EBUSY;
+
+ rtnl_lock();
+ udev->uio_dev = iminor(inode);
+ qedi_reset_uio_rings(udev);
+ set_bit(UIO_DEV_OPENED, &qedi->flags);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ udev->uio_dev = -1;
+ clear_bit(UIO_DEV_OPENED, &qedi->flags);
+ qedi_ll2_free_skbs(qedi);
+ return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+ if (udev->ll2_ring) {
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+ }
+
+ if (udev->ll2_buf) {
+ free_pages((unsigned long)udev->ll2_buf, 2);
+ udev->ll2_buf = NULL;
+ }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ uio_unregister_device(&udev->qedi_uinfo);
+
+ __qedi_free_uio_rings(udev);
+
+ pci_dev_put(udev->pdev);
+ kfree(udev->uctrl);
+ kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ list_del_init(&udev->list);
+ __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+ struct qedi_ctx *qedi = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+
+ qedi = udev->qedi;
+ uctrl = udev->uctrl;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ uctrl->host_rx_cons = 0;
+ uctrl->hw_rx_prod = 0;
+ uctrl->hw_rx_bd_prod = 0;
+ uctrl->host_rx_bd_cons = 0;
+
+ memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+ memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+ int rc = 0;
+
+ if (udev->ll2_ring || udev->ll2_buf)
+ return rc;
+
+ /* Allocating memory for LL2 ring */
+ udev->ll2_ring_size = QEDI_PAGE_SIZE;
+ udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+ if (!udev->ll2_ring) {
+ rc = -ENOMEM;
+ goto exit_alloc_ring;
+ }
+
+ /* Allocating memory for Tx/Rx pkt buffer */
+ udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+ udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+ udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+ __GFP_ZERO, 2);
+ if (!udev->ll2_buf) {
+ rc = -ENOMEM;
+ goto exit_alloc_buf;
+ }
+ return rc;
+
+exit_alloc_buf:
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+exit_alloc_ring:
+ return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ int rc = 0;
+
+ list_for_each_entry(udev, &qedi_udev_list, list) {
+ if (udev->pdev == qedi->pdev) {
+ udev->qedi = qedi;
+ if (__qedi_alloc_uio_rings(udev)) {
+ udev->qedi = NULL;
+ return -ENOMEM;
+ }
+ qedi->udev = udev;
+ return 0;
+ }
+ }
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev) {
+ rc = -ENOMEM;
+ goto err_udev;
+ }
+
+ uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+ if (!uctrl) {
+ rc = -ENOMEM;
+ goto err_uctrl;
+ }
+
+ udev->uio_dev = -1;
+
+ udev->qedi = qedi;
+ udev->pdev = qedi->pdev;
+ udev->uctrl = uctrl;
+
+ rc = __qedi_alloc_uio_rings(udev);
+ if (rc)
+ goto err_uio_rings;
+
+ list_add(&udev->list, &qedi_udev_list);
+
+ pci_dev_get(udev->pdev);
+ qedi->udev = udev;
+
+ udev->tx_pkt = udev->ll2_buf;
+ udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+ return 0;
+
+ err_uio_rings:
+ kfree(uctrl);
+ err_uctrl:
+ kfree(udev);
+ err_udev:
+ return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = qedi->udev;
+ struct uio_info *uinfo;
+ int ret = 0;
+
+ if (!udev)
+ return -ENOMEM;
+
+ uinfo = &udev->qedi_uinfo;
+
+ uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+ uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+ uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+ uinfo->mem[1].size = udev->ll2_ring_size;
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+ uinfo->mem[2].size = udev->ll2_buf_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->name = "qedi_uio";
+ uinfo->version = QEDI_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = qedi_uio_open;
+ uinfo->release = qedi_uio_close;
+
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
+
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO registration failed\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int ret;
+
+ sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(struct status_block), &sb_phys,
+ GFP_KERNEL);
+ if (!sb_virt) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
+ return -ENOMEM;
+ }
+
+ ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+ sb_id, QED_SB_TYPE_STORAGE);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block initialization failed for id = %d.\n",
+ sb_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+ struct qed_sb_info *sb_info;
+ int id;
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ sb_info = &qedi->sb_array[id];
+ if (sb_info->sb_virt)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt,
+ sb_info->sb_phys);
+ }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+ kfree(qedi->fp_array);
+ kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+ int ret = 0;
+
+ qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qedi_fastpath), GFP_KERNEL);
+ if (!qedi->fp_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath fp array allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qed_sb_info), GFP_KERNEL);
+ if (!qedi->sb_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath sb array allocation failed.\n");
+ ret = -ENOMEM;
+ goto free_fp;
+ }
+
+ return ret;
+
+free_fp:
+ qedi_free_fp(qedi);
+ return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id;
+
+ memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->fp_array));
+ memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->sb_array));
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ fp->sb_info = &qedi->sb_array[id];
+ fp->sb_id = id;
+ fp->qedi = qedi;
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ "qedi", id);
+
+ /* fp_array[i] ---- irq cookie
+ * So init data which is needed in int ctx
+ */
+ }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id, ret = 0;
+
+ ret = qedi_alloc_fp(qedi);
+ if (ret)
+ goto err;
+
+ qedi_int_fp(qedi);
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "SB allocation and initialization failed.\n");
+ ret = -EIO;
+ goto err_init;
+ }
+ }
+
+ return 0;
+
+err_init:
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+err:
+ return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+ int i;
+
+ qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+ sizeof(u32), GFP_KERNEL);
+ if (!qedi->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+ sizeof(struct qedi_conn *),
+ GFP_KERNEL);
+ if (!qedi->cid_que.conn_cid_tbl) {
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+ qedi->cid_que.cid_q_prod_idx = 0;
+ qedi->cid_que.cid_q_cons_idx = 0;
+ qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+ qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi->cid_que.cid_que[i] = i;
+ qedi->cid_que.conn_cid_tbl[i] = NULL;
+ }
+
+ return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+
+ kfree(qedi->cid_que.conn_cid_tbl);
+ qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+ u16 start_id, u16 next)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = next;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+ u16 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = QEDI_LOCAL_PORT_INVALID;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = QEDI_LOCAL_PORT_INVALID;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ if (id == QEDI_LOCAL_PORT_INVALID)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+ kfree(qedi->ep_tbl);
+ qedi->ep_tbl = NULL;
+ qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+ u16 port_id;
+
+ qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+ sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+ if (!qedi->ep_tbl)
+ return -ENOMEM;
+ port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+ QEDI_LOCAL_PORT_MIN, port_id)) {
+ qedi_cm_free_mem(qedi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi = NULL;
+
+ shost = iscsi_host_alloc(&qedi_host_template,
+ sizeof(struct qedi_ctx), 0);
+ if (!shost) {
+ QEDI_ERR(NULL, "Could not allocate shost\n");
+ goto exit_setup_shost;
+ }
+
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = 16;
+ shost->transportt = qedi_scsi_transport;
+
+ qedi = iscsi_host_priv(shost);
+ memset(qedi, 0, sizeof(*qedi));
+ qedi->shost = shost;
+ qedi->dbg_ctx.host_no = shost->host_no;
+ qedi->pdev = pdev;
+ qedi->dbg_ctx.pdev = pdev;
+ qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+ qedi->max_sqes = QEDI_SQ_SIZE;
+
+ if (shost_use_blk_mq(shost))
+ shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+ return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct skb_work_list *work;
+ u32 prod;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+ "UIO DEV is not opened\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate work so dropping frame.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&work->list);
+ work->skb = skb;
+
+ if (skb_vlan_tag_present(skb))
+ work->vlan_id = skb_vlan_tag_get(skb);
+
+ if (work->vlan_id)
+ __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+ ++uctrl->hw_rx_prod_cnt;
+ prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+ if (prod != uctrl->host_rx_cons) {
+ uctrl->hw_rx_prod = prod;
+ spin_unlock_bh(&qedi->ll2_lock);
+ wake_up_process(qedi->ll2_recv_thread);
+ return 0;
+ }
+
+ spin_unlock_bh(&qedi->ll2_lock);
+ return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+ u16 vlan_id)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ struct qedi_rx_bd rxbd;
+ struct qedi_rx_bd *p_rxbd;
+ u32 rx_bd_prod;
+ void *pkt;
+ int len = 0;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+ pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+ len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+ memcpy(pkt, skb->data, len);
+
+ memset(&rxbd, 0, sizeof(rxbd));
+ rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+ rxbd.rx_pkt_len = len;
+ rxbd.vlan_id = vlan_id;
+
+ uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+ rx_bd_prod = uctrl->hw_rx_bd_prod;
+ p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+ p_rxbd += rx_bd_prod;
+
+ memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+ /* notify the iscsiuio about new packet */
+ uio_event_notify(&udev->qedi_uinfo);
+
+ return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+ struct skb_work_list *work, *work_tmp;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+ list_del(&work->list);
+ if (work->skb)
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+ struct skb_work_list *work, *work_tmp;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+ list) {
+ list_del(&work->list);
+ qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&qedi->ll2_lock);
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ u8 num_sq_pages;
+ u32 log_page_size;
+ int rval = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+ MIN_NUM_CPUS_MSIX(qedi));
+
+ num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+ qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ memset(&qedi->pf_params.iscsi_pf_params, 0,
+ sizeof(qedi->pf_params.iscsi_pf_params));
+
+ qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+ &qedi->hw_p_cpuq);
+ if (!qedi->p_cpuq) {
+ QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ rval = qedi_alloc_global_queues(qedi);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+ qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+ qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+ qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+ for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+ if ((1 << log_page_size) == PAGE_SIZE)
+ break;
+ }
+ qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+ qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+ (u64)qedi->hw_p_cpuq;
+
+ /* RQ BDQ initializations.
+ * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+ * rqe_log_size: 8 for 256B RQE
+ */
+ qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+ /* BDQ address and size */
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_dma;
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_num_entries;
+ qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+ /* cq_num_entries: num_tasks + rq_num_entries */
+ qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+ qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+ qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+ qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+ return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ size_t size = 0;
+
+ if (qedi->p_cpuq) {
+ size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+ pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ qedi->hw_p_cpuq);
+ }
+
+ qedi_free_global_queues(qedi);
+
+ kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+ if (link->link_up) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_UP);
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Link Down event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+ {
+ .link_update = qedi_link_update,
+ }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+ u16 que_idx, struct qedi_percpu_s *p)
+{
+ struct qedi_work *qedi_work;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 iscsi_cid;
+ int rc = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return -1;
+ }
+ conn = q_conn->cls_conn->dd_data;
+
+ switch (cqe->cqe_common.cqe_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+ if (!qedi_cmd) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+ qedi_cmd->cqe_work.qedi = qedi;
+ memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_cmd->cqe_work.que_idx = que_idx;
+ qedi_cmd->cqe_work.is_solicited = true;
+ list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ case ISCSI_CQE_TYPE_DUMMY:
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+ if (!qedi_work) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_work->list);
+ qedi_work->qedi = qedi;
+ memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_work->que_idx = que_idx;
+ qedi_work->is_solicited = false;
+ list_add_tail(&qedi_work->list, &p->work_list);
+ break;
+ default:
+ rc = -1;
+ QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+ }
+ return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ struct qedi_percpu_s *p = NULL;
+ struct global_queue *que;
+ u16 prod_idx;
+ unsigned long flags;
+ union iscsi_cqe *cqe;
+ int cpu;
+ int ret;
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ que = qedi->global_queues[fp->sb_id];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+ que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+ qedi->intr_cpu = fp->sb_id;
+ cpu = smp_processor_id();
+ p = &per_cpu(qedi_percpu, cpu);
+
+ if (unlikely(!p->iothread))
+ WARN_ON(1);
+
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (que->cq_cons_idx != prod_idx) {
+ cqe = &que->cq[que->cq_cons_idx];
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "cqe=%p prod_idx=%d cons_idx=%d.\n",
+ cqe, prod_idx, que->cq_cons_idx);
+
+ ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+ if (ret)
+ continue;
+
+ que->cq_cons_idx++;
+ if (que->cq_cons_idx == QEDI_CQ_SIZE)
+ que->cq_cons_idx = 0;
+ }
+ wake_up_process(p->iothread);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct global_queue *que;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ u16 prod_idx;
+
+ barrier();
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedi->global_queues[fp->sb_id];
+
+ /* prod idx wrap around uint16 */
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+ struct qedi_fastpath *fp = dev_id;
+ struct qedi_ctx *qedi = fp->qedi;
+ bool wake_io_thread = true;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+ wake_io_thread = qedi_process_completions(fp);
+ if (wake_io_thread) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "process already running\n");
+ }
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* Check for more work */
+ rmb();
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ else
+ goto process_again;
+
+ return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+ /* Cookie is qedi_ctx struct */
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+ QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM 0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->int_info.msix_cnt) {
+ for (i = 0; i < qedi->int_info.used_cnt; i++) {
+ synchronize_irq(qedi->int_info.msix[i].vector);
+ irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ NULL);
+ free_irq(qedi->int_info.msix[i].vector,
+ &qedi->fp_array[i]);
+ }
+ } else {
+ qedi_ops->common->simd_handler_clean(qedi->cdev,
+ QEDI_SIMD_HANDLER_NUM);
+ }
+
+ qedi->int_info.used_cnt = 0;
+ qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+ int i, rc, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ rc = request_irq(qedi->int_info.msix[i].vector,
+ qedi_msix_handler, 0, "qedi",
+ &qedi->fp_array[i]);
+
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+ qedi_sync_free_irqs(qedi);
+ return rc;
+ }
+ qedi->int_info.used_cnt++;
+ rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+ int rc = 0;
+
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+ if (rc)
+ goto exit_setup_int;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+ qedi->int_info.msix_cnt, num_online_cpus());
+
+ if (qedi->int_info.msix_cnt) {
+ rc = qedi_request_msix_irq(qedi);
+ goto exit_setup_int;
+ } else {
+ qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+ QEDI_SIMD_HANDLER_NUM,
+ qedi_simd_int_handler);
+ qedi->int_info.used_cnt = 1;
+ }
+
+exit_setup_int:
+ return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->bdq_pbl_list)
+ dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+ if (qedi->bdq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+ qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ if (qedi->bdq[i].buf_addr) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+ qedi->bdq[i].buf_addr,
+ qedi->bdq[i].buf_dma);
+ }
+ }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+ int i;
+ struct global_queue **gl = qedi->global_queues;
+
+ for (i = 0; i < qedi->num_queues; i++) {
+ if (!gl[i])
+ continue;
+
+ if (gl[i]->cq)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+ gl[i]->cq, gl[i]->cq_dma);
+ if (gl[i]->cq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+ gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+ kfree(gl[i]);
+ }
+ qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+ struct scsi_bd *pbl;
+ u64 *list;
+ dma_addr_t page;
+
+ /* Alloc dma memory for BDQ buffers */
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ qedi->bdq[i].buf_addr =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_BDQ_BUF_SIZE,
+ &qedi->bdq[i].buf_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq[i].buf_addr) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate BDQ buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Alloc dma memory for BDQ page buffer list */
+ qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+ qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+ qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+ qedi->rq_num_entries);
+
+ qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->bdq_pbl_mem_size,
+ &qedi->bdq_pbl_dma, GFP_KERNEL);
+ if (!qedi->bdq_pbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Populate BDQ PBL with physical and virtual address of individual
+ * BDQ buffers
+ */
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ pbl->address.hi =
+ cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+ pbl->address.lo =
+ cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, i);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl++;
+ }
+
+ /* Allocate list of PBL pages */
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq_pbl_list) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate list of PBL pages.\n");
+ return -ENOMEM;
+ }
+ memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+ /*
+ * Now populate PBL list with pages that contain pointers to the
+ * individual buffers.
+ */
+ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+ list = (u64 *)qedi->bdq_pbl_list;
+ page = qedi->bdq_pbl_list_dma;
+ for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+ *list = qedi->bdq_pbl_dma;
+ list++;
+ page += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+ u32 *list;
+ int i;
+ int status = 0, rc;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /*
+ * Number of global queues (CQ / RQ). This should
+ * be <= number of available MSIX vectors for the PF
+ */
+ if (!qedi->num_queues) {
+ QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+ return 1;
+ }
+
+ /* Make sure we allocated the PBL that will contain the physical
+ * addresses of our queues
+ */
+ if (!qedi->p_cpuq) {
+ status = 1;
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+ qedi->num_queues), GFP_KERNEL);
+ if (!qedi->global_queues) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate global queues array ptr memory\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "qedi->global_queues=%p.\n", qedi->global_queues);
+
+ /* Allocate DMA coherent buffers for BDQ */
+ rc = qedi_alloc_bdq(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
+ /* Allocate a CQ and an associated PBL for each MSI-X
+ * vector.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ qedi->global_queues[i] =
+ kzalloc(sizeof(*qedi->global_queues[0]),
+ GFP_KERNEL);
+ if (!qedi->global_queues[i]) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocation global queue %d.\n", i);
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues[i]->cq_mem_size =
+ (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+ qedi->global_queues[i]->cq_mem_size =
+ (qedi->global_queues[i]->cq_mem_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE) * sizeof(void *);
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_pbl_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq, 0,
+ qedi->global_queues[i]->cq_mem_size);
+
+ qedi->global_queues[i]->cq_pbl =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq PBL.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq_pbl, 0,
+ qedi->global_queues[i]->cq_pbl_size);
+
+ /* Create PBL */
+ num_pages = qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE;
+ page = qedi->global_queues[i]->cq_dma;
+ pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+ }
+
+ list = (u32 *)qedi->p_cpuq;
+
+ /*
+ * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+ * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
+ * to the physical address which contains an array of pointers to the
+ * physical addresses of the specific queue pages.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+ list++;
+ *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+ list++;
+
+ *list = (u32)0;
+ list++;
+ *list = (u32)((u64)0 >> 32);
+ list++;
+ }
+
+ return 0;
+
+mem_alloc_failure:
+ qedi_free_global_queues(qedi);
+ return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ int rval = 0;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ if (!ep)
+ return -EIO;
+
+ /* Calculate appropriate queue and PBL sizes */
+ ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+ ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+ ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+ ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
+ if (!ep->sq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue.\n");
+ rval = -ENOMEM;
+ goto out;
+ }
+ memset(ep->sq, 0, ep->sq_mem_size);
+
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
+ if (!ep->sq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue PBL.\n");
+ rval = -ENOMEM;
+ goto out_free_sq;
+ }
+ memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+ /* Create PBL */
+ num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+ page = ep->sq_dma;
+ pbl = (u32 *)ep->sq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+
+ return rval;
+
+out_free_sq:
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+out:
+ return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ if (ep->sq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+ ep->sq_pbl_dma);
+ if (ep->sq)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+ s16 tmp_idx;
+
+again:
+ tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+ MAX_ISCSI_TASK_ENTRIES);
+
+ if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+ QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+ tmp_idx = -1;
+ goto err_idx;
+ }
+
+ if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+ goto again;
+
+err_idx:
+ return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+ if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "FW task context, already cleared, tid=0x%x\n", idx);
+ WARN_ON(1);
+ }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *cmd)
+{
+ qedi->itt_map[tid].itt = proto_itt;
+ qedi->itt_map[tid].p_cmd = cmd;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+ qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+ u16 i;
+
+ for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+ if (qedi->itt_map[i].itt == itt) {
+ *tid = i;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Ref itt=0x%x, found at tid=0x%x\n",
+ itt, *tid);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+ *proto_itt = qedi->itt_map[tid].itt;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Get itt map tid [0x%x with proto itt[0x%x]",
+ tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+ struct qedi_cmd *cmd = NULL;
+
+ if (tid > MAX_ISCSI_TASK_ENTRIES)
+ return NULL;
+
+ cmd = qedi->itt_map[tid].p_cmd;
+ if (cmd->task_id != tid)
+ return NULL;
+
+ qedi->itt_map[tid].p_cmd = NULL;
+
+ return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+ qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+ sizeof(struct qedi_itt_map), GFP_KERNEL);
+ if (!qedi->itt_map) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate itt map array memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+ kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+ .rx_cb = qedi_ll2_rx,
+ .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+ struct qedi_percpu_s *p = arg;
+ struct qedi_work *work, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+ cond_resched();
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static void qedi_percpu_thread_create(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(qedi_percpu, cpu);
+
+ thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "qedi_thread/%d", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void qedi_percpu_thread_destroy(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+ struct qedi_work *work, *tmp;
+
+ p = &per_cpu(qedi_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+}
+
+static int qedi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ QEDI_ERR(NULL, "CPU %d online.\n", cpu);
+ qedi_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
+ qedi_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qedi_cpu_notifier = {
+ .notifier_call = qedi_cpu_callback,
+};
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+ struct qed_ll2_params params;
+
+ qedi_recover_all_conns(qedi);
+
+ qedi_ops->ll2->stop(qedi->cdev);
+ qedi_ll2_free_skbs(qedi);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+ qedi->ll2_mtu, mtu);
+ memset(&params, 0, sizeof(params));
+ qedi->ll2_mtu = mtu;
+ params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
+
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ qedi_sync_free_irqs(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->stop(qedi->cdev);
+ qedi_ops->ll2->stop(qedi->cdev);
+ }
+
+ if (mode == QEDI_MODE_NORMAL)
+ qedi_free_iscsi_pf_param(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+ qedi_ops->common->remove(qedi->cdev);
+ }
+
+ qedi_destroy_fp(qedi);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ qedi_release_cid_que(qedi);
+ qedi_cm_free_mem(qedi);
+ qedi_free_uio(qedi->udev);
+ qedi_free_itt(qedi);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
+
+ if (qedi->ll2_recv_thread) {
+ kthread_stop(qedi->ll2_recv_thread);
+ qedi->ll2_recv_thread = NULL;
+ }
+ qedi_ll2_free_skbs(qedi);
+ }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi;
+ struct qed_ll2_params params;
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+ bool is_vf = false;
+ char host_buf[16];
+ struct qed_link_params link_params;
+ struct qed_slowpath_params sp_params;
+ struct qed_probe_params qed_params;
+ void *task_start, *task_end;
+ int rc;
+ u16 tmp;
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi = qedi_host_alloc(pdev);
+ if (!qedi) {
+ rc = -ENOMEM;
+ goto exit_probe;
+ }
+ } else {
+ qedi = pci_get_drvdata(pdev);
+ }
+
+ memset(&qed_params, 0, sizeof(qed_params));
+ qed_params.protocol = QED_PROTOCOL_ISCSI;
+ qed_params.dp_module = dp_module;
+ qed_params.dp_level = dp_level;
+ qed_params.is_vf = is_vf;
+ qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+ if (!qedi->cdev) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+ goto free_host;
+ }
+
+ qedi->msix_count = MAX_NUM_MSIX_PF;
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
+ }
+ }
+
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ rc = qedi_prepare_fp(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+ goto free_pf_params;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+ sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+ sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+ sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+ strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+ rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+ goto stop_hw;
+ }
+
+ /* update_pf_params needs to be called before and after slowpath
+ * start
+ */
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ qedi_setup_int(qedi);
+ if (rc)
+ goto stop_iscsi_func;
+
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ /* Learn information crucial for qedi to progress */
+ rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+ if (rc)
+ goto stop_iscsi_func;
+
+ /* Record BDQ producer doorbell addresses */
+ qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+ qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "BDQ primary_prod=%p secondary_prod=%p.\n",
+ qedi->bdq_primary_prod,
+ qedi->bdq_secondary_prod);
+
+ /*
+ * We need to write the number of BDs in the BDQ we've preallocated so
+ * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+ * packet arrives.
+ */
+ qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Writing %d to primary and secondary BDQ doorbell registers.\n",
+ qedi->bdq_prod_idx);
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+
+ ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+ qedi->mac);
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+ qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+ memset(&params, 0, sizeof(params));
+ params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+ qedi->ll2_mtu = DEF_PATH_MTU;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ /* set up rx path */
+ INIT_LIST_HEAD(&qedi->ll2_skb_list);
+ spin_lock_init(&qedi->ll2_lock);
+ /* start qedi context */
+ spin_lock_init(&qedi->hba_lock);
+ spin_lock_init(&qedi->task_idx_lock);
+ }
+ qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+ (void *)qedi,
+ "qedi_ll2_thread");
+ }
+
+ rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+ qedi, qedi_iscsi_event_cb);
+ if (rc) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+ goto stop_slowpath;
+ }
+
+ task_start = qedi_get_task_mem(&qedi->tasks, 0);
+ task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Task context start=%p, end=%p block_size=%u.\n",
+ task_start, task_end, qedi->tasks.size);
+
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+ &qedi_dbg_fops);
+#endif
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+ QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not add iscsi host\n");
+ rc = -ENOMEM;
+ goto remove_host;
+ }
+
+ /* Allocate uio buffers */
+ rc = qedi_alloc_uio_rings(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO alloc ring failed err=%d\n", rc);
+ goto remove_host;
+ }
+
+ rc = qedi_init_uio(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO init failed, err=%d\n", rc);
+ goto free_uio;
+ }
+
+ /* host the array on iscsi_conn */
+ rc = qedi_setup_cid_que(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not setup cid que\n");
+ goto free_uio;
+ }
+
+ rc = qedi_cm_alloc_mem(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc cm memory\n");
+ goto free_cid_que;
+ }
+
+ rc = qedi_alloc_itt(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc itt memory\n");
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ if (!qedi->tmf_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start tmf thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+ qedi->offload_thread = create_workqueue(host_buf);
+ if (!qedi->offload_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start offload thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ /* F/w needs 1st task context memory entry for performance */
+ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+ atomic_set(&qedi->num_offloads, 0);
+ }
+
+ return 0;
+
+free_cid_que:
+ qedi_release_cid_que(qedi);
+free_uio:
+ qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+ qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+ qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+ qedi_free_iscsi_pf_param(qedi);
+free_host:
+ iscsi_host_free(qedi->shost);
+exit_probe:
+ return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+ __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static struct pci_driver qedi_pci_driver = {
+ .name = QEDI_MODULE_NAME,
+ .id_table = qedi_pci_tbl,
+ .probe = qedi_probe,
+ .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+ int rc = 0;
+ int ret;
+ struct qedi_percpu_s *p;
+ unsigned int cpu = 0;
+
+ qedi_ops = qed_get_iscsi_ops();
+ if (!qedi_ops) {
+ QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_0;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_init("qedi");
+#endif
+
+ qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+ if (!qedi_scsi_transport) {
+ QEDI_ERR(NULL, "Could not register qedi transport");
+ rc = -ENOMEM;
+ goto exit_qedi_init_1;
+ }
+
+ register_hotcpu_notifier(&qedi_cpu_notifier);
+
+ ret = pci_register_driver(&qedi_pci_driver);
+ if (ret) {
+ QEDI_ERR(NULL, "Failed to register driver\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_2;
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(qedi_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_create(cpu);
+
+ return rc;
+
+exit_qedi_init_2:
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+exit_qedi_init_0:
+ return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+ unsigned int cpu = 0;
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_destroy(cpu);
+
+ pci_unregister_driver(&qedi_pci_driver);
+ unregister_hotcpu_notifier(&qedi_cpu_notifier);
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644
index 000000000000..b10c48bd1428
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+ if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+ return sprintf(buf, "Online\n");
+ else
+ return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+ struct qed_link_output if_link;
+
+ qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+ return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state,
+ &dev_attr_speed,
+ NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644
index 000000000000..9543a1b139d4
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER 8
+#define QEDI_DRIVER_MINOR_VER 10
+#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c901f7..47eb4d545d13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
- uint16_t options = 0;
int cnt;
struct req_que *req = ha->req_q_map[0];
+ struct qla_qpair *qpair;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
- if (ha->flags.cpu_affinity_enabled) {
- req = ha->req_q_map[1];
- ql_dbg(ql_dbg_multiq, vha, 0xc000,
- "Request queue %p attached with "
- "VP[%d], cpu affinity =%d\n",
- req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
- goto vport_queue;
- } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ if (!ql2xmqsupport || !ha->npiv_info)
goto vport_queue;
+
/* Create a request queue in QoS mode for the vport */
for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
- qos);
- if (!ret)
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
- "Can't create request queue for VP[%d]\n",
+ "Can't create qpair for VP[%d]\n",
vha->vp_idx);
else {
ql_dbg(ql_dbg_multiq, vha, 0xc001,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
+ "Queue pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
ql_dbg(ql_dbg_user, vha, 0x7085,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
- req = ha->req_q_map[ret];
+ "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
+ req = qpair->req;
+ vha->qpair = qpair;
}
}
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
- if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+ if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
- "Queue delete failed.\n");
+ "Queue Pair delete failed.\n");
}
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34ddc432..21d9fb7fc887 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0191 | 0x0146 |
+ * | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
@@ -58,7 +58,7 @@
* | | | 0xb13a,0xb142 |
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
- * | MultiQ | 0xc00c | |
+ * | MultiQ | 0xc010 | |
* | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5236e3f2a06a..f7df01b76714 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -401,6 +401,7 @@ typedef struct srb {
uint16_t type;
char *name;
int iocbs;
+ struct qla_qpair *qpair;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*start_scsi_mq) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+#define QLA_ATIO_VECTOR 0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
struct qla_msix_entry {
int have_irq;
+ int in_use;
uint32_t vector;
uint16_t entry;
- struct rsp_que *rsp;
+ char name[30];
+ void *handle;
struct irq_affinity_notify irq_notify;
int cpuid;
};
@@ -2872,7 +2878,6 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
- struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+/*Queue pair data structure */
+struct qla_qpair {
+ spinlock_t qp_lock;
+ atomic_t ref_count;
+ /* distill these fields down to 'online=0/1'
+ * ha->flags.eeh_busy
+ * ha->flags.pci_channel_io_perm_failure
+ * base_vha->loop_state
+ */
+ uint32_t online:1;
+ /* move vha->flags.difdix_supported here */
+ uint32_t difdix_supported:1;
+ uint32_t delete_in_progress:1;
+
+ uint16_t id; /* qp number used with FW */
+ uint16_t num_active_cmd; /* cmds down at firmware */
+ cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+ uint16_t vp_idx; /* vport ID */
+
+ mempool_t *srb_mempool;
+
+ /* to do: New driver: move queues to here instead of pointers */
+ struct req_que *req;
+ struct rsp_que *rsp;
+ struct atio_que *atio;
+ struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+ struct qla_hw_data *hw;
+ struct work_struct q_work;
+ struct list_head qp_list_elem; /* vha->qp_list */
+};
+
/* Place holder for FW buffer parameters */
struct qlfc_fw {
void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
uint32_t chip_reset_done :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
- uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
uint32_t isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
uint8_t mqenable;
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
+ struct qla_qpair **queue_pair_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+ / sizeof(unsigned long)];
uint8_t max_req_queues;
uint8_t max_rsp_queues;
+ uint8_t max_qpairs;
+ struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
struct mutex vport_lock; /* Virtual port synchronization */
spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+ struct mutex mq_lock; /* multi-queue synchronization */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
+ uint32_t qpairs_available:1;
} flags;
atomic_t loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
#define FX00_TARGET_SCAN 24
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+#define QPAIR_ONLINE_CHECK_NEEDED 27
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
/* List of pending PLOGI acks, protected by hw lock */
struct list_head plogi_ack_list;
+ struct list_head qp_list;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
+ struct qla_qpair *qpair; /* base qpair */
unsigned long vp_flags;
#define VP_IDX_ACQUIRED 0 /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
scsi_qla_host_t *vha;
};
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
/*
* Macros to help code, maintain, etc.
*/
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
- atomic_inc(&__vha->vref_count); \
- mb(); \
- if (__vha->flags.delete_progress) { \
- atomic_dec(&__vha->vref_count); \
- __bail = 1; \
- } else { \
- __bail = 0; \
- } \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
- atomic_dec(&__vha->vref_count); \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+ atomic_dec(&__vha->vref_count); \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
+ atomic_inc(&__qpair->ref_count); \
+ mb(); \
+ if (__qpair->delete_in_progress) { \
+ atomic_dec(&__qpair->ref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
+ atomic_dec(&__qpair->ref_count); \
+
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c51d9f3359e3..afa0116a163b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -91,12 +91,17 @@ extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+ int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
/*
* Global Data in qla_os.c source file.
*/
extern char qla2x00_version_str[];
+extern struct kmem_cache *srb_cachep;
+
extern int ql2xlogintimeout;
extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
/*
* Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+ uint16_t, struct req_que *);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+ struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+ struct qla_msix_entry *, int);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
uint16_t, int, uint8_t);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int);
+ uint16_t, struct qla_qpair *);
+
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296b46a3..632d5f30386a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
if (req->outstanding_cmds)
return QLA_SUCCESS;
- if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
- (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ if (!IS_FWI2_CAPABLE(ha))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
struct req_que *req;
struct rsp_que *rsp;
- if (vha->hw->flags.cpu_affinity_enabled)
- req = vha->hw->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
rsp = req->rsp;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
rsp = req->rsp;
if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+ int rsp_id = 0;
+ int req_id = 0;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t qpair_id = 0;
+ struct qla_qpair *qpair = NULL;
+ struct qla_msix_entry *msix;
+
+ if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+ ql_log(ql_log_warn, vha, 0x00181,
+ "FW/Driver is not multi-queue capable.\n");
+ return NULL;
+ }
+
+ if (ql2xmqsupport) {
+ qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate memory for queue pair.\n");
+ return NULL;
+ }
+ memset(qpair, 0, sizeof(struct qla_qpair));
+
+ qpair->hw = vha->hw;
+
+ /* Assign available que pair id */
+ mutex_lock(&ha->mq_lock);
+ qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+ if (qpair_id >= ha->max_qpairs) {
+ mutex_unlock(&ha->mq_lock);
+ ql_log(ql_log_warn, vha, 0x0183,
+ "No resources to create additional q pair.\n");
+ goto fail_qid_map;
+ }
+ set_bit(qpair_id, ha->qpair_qid_map);
+ ha->queue_pair_map[qpair_id] = qpair;
+ qpair->id = qpair_id;
+ qpair->vp_idx = vp_idx;
+
+ for (i = 0; i < ha->msix_count; i++) {
+ msix = &ha->msix_entries[i];
+ if (msix->in_use)
+ continue;
+ qpair->msix = msix;
+ ql_log(ql_dbg_multiq, vha, 0xc00f,
+ "Vector %x selected for qpair\n", msix->vector);
+ break;
+ }
+ if (!qpair->msix) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Out of MSI-X vectors!.\n");
+ goto fail_msix;
+ }
+
+ qpair->msix->in_use = 1;
+ list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+ mutex_unlock(&ha->mq_lock);
+
+ /* Create response queue first */
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ if (!rsp_id) {
+ ql_log(ql_log_warn, vha, 0x0185,
+ "Failed to create response queue.\n");
+ goto fail_rsp;
+ }
+
+ qpair->rsp = ha->rsp_q_map[rsp_id];
+
+ /* Create request queue */
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ if (!req_id) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to create request queue.\n");
+ goto fail_req;
+ }
+
+ qpair->req = ha->req_q_map[req_id];
+ qpair->rsp->req = qpair->req;
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4)
+ qpair->difdix_supported = 1;
+ }
+
+ qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!qpair->srb_mempool) {
+ ql_log(ql_log_warn, vha, 0x0191,
+ "Failed to create srb mempool for qpair %d\n",
+ qpair->id);
+ goto fail_mempool;
+ }
+
+ /* Mark as online */
+ qpair->online = 1;
+
+ if (!vha->flags.qpairs_available)
+ vha->flags.qpairs_available = 1;
+
+ ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ ql_dbg(ql_dbg_init, vha, 0x0187,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ }
+ return qpair;
+
+fail_mempool:
+fail_req:
+ qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+ mutex_lock(&ha->mq_lock);
+ qpair->msix->in_use = 0;
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+fail_msix:
+ ha->queue_pair_map[qpair_id] = NULL;
+ clear_bit(qpair_id, ha->qpair_qid_map);
+ mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+ kfree(qpair);
+ return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+ int ret;
+ struct qla_hw_data *ha = qpair->hw;
+
+ qpair->delete_in_progress = 1;
+ while (atomic_read(&qpair->ref_count))
+ msleep(500);
+
+ ret = qla25xx_delete_req_que(vha, qpair->req);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+ ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+
+ mutex_lock(&ha->mq_lock);
+ ha->queue_pair_map[qpair->id] = NULL;
+ clear_bit(qpair->id, ha->qpair_qid_map);
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+ mempool_destroy(qpair->srb_mempool);
+ kfree(qpair);
+ mutex_unlock(&ha->mq_lock);
+
+ return QLA_SUCCESS;
+fail:
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index edc48f3b8230..44e404583c86 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -216,6 +216,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
}
static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ uint8_t bail;
+
+ QLA_QPAIR_MARK_BUSY(qpair, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(qpair->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+ mempool_free(sp, qpair->srb_mempool);
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
+static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 221ad8907893..58e49a3e1de8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -12,7 +12,6 @@
#include <scsi/scsi_tcq.h>
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
return (cont_pkt);
}
-static inline int
+inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct req_que *req)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
}
}
-struct qla2_sgx {
- dma_addr_t dma_addr; /* OUT */
- uint32_t dma_len; /* OUT */
-
- uint32_t tot_bytes; /* IN */
- struct scatterlist *cur_sg; /* IN */
-
- /* for book keeping, bzero on initial invocation */
- uint32_t bytes_consumed;
- uint32_t num_bytes;
- uint32_t tot_partial;
-
- /* for debugging */
- uint32_t num_sg;
- srb_t *sp;
-};
-
-static int
+int
qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
uint32_t *partial)
{
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline int
+inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where completion should happen */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
}
/* Setup device pointers. */
-
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_type_7 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- int affinity = cmd->request->cpu;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ cmd_pkt->task = TSK_SIMPLE;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+ struct qla_qpair *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Check for host side state */
+ if (!qpair->online) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ if (!qpair->difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ /* Only process protection or >16 cdb in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+ if (cmd->cmd_len <= 16)
+ return qla2xxx_start_scsi_mq(sp);
+ }
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
- affinity < ha->max_rsp_queues - 1)
- *rsp = ha->rsp_q_map[affinity + 1];
- else
- *rsp = ha->rsp_q_map[0];
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->timeout = cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
}
/* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d8efddf6f312..5093ca9b02ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2872,41 +2872,6 @@ out:
}
static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- scsi_qla_host_t *vha;
- struct rsp_que *rsp;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
- uint32_t hccr = 0;
-
- rsp = (struct rsp_que *) dev_id;
- if (!rsp) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = rsp->hw;
- vha = pci_get_drvdata(ha->pdev);
-
- /* Clear the interrupt, if enabled, for this response queue */
- if (!ha->flags.disable_msix_handshake) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
- if (qla2x00_check_reg32_for_disconnect(vha, hccr))
- goto out;
- queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
scsi_qla_host_t *vha;
@@ -3002,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (unlikely(!ha->flags.disable_msix_handshake)) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -3009,69 +3003,28 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
{ "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+ { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
{ "qla2xxx (default)", qla82xx_msix_default },
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
- int i;
- struct qla_msix_entry *qentry;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
- for (i = 0; i < ha->msix_count; i++) {
- qentry = &ha->msix_entries[i];
- if (qentry->have_irq) {
- /* un-register irq cpu affinity notification */
- irq_set_affinity_notifier(qentry->vector, NULL);
- free_irq(qentry->vector, qentry->rsp);
- }
- }
- pci_disable_msix(ha->pdev);
- kfree(ha->msix_entries);
- ha->msix_entries = NULL;
- ha->flags.msix_enabled = 0;
- ql_dbg(ql_dbg_init, vha, 0x0042,
- "Disabled the MSI.\n");
-}
-
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR 2
int i, ret;
- struct msix_entry *entries;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
- if (!entries) {
- ql_log(ql_log_warn, vha, 0x00bc,
- "Failed to allocate memory for msix_entry.\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < ha->msix_count; i++)
- entries[i].entry = i;
-
- ret = pci_enable_msix_range(ha->pdev,
- entries, MIN_MSIX_COUNT, ha->msix_count);
+ ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3081,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
} else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
- "-- %d/%d\n Retry with %d vectors.\n",
- ha->msix_count, ret, ret);
+ "with %d vectors, using %d vectors.\n",
+ ha->msix_count, ret);
ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
+ /* Recalculate queue values */
+ if (ha->mqiobase && ql2xmqsupport) {
+ ha->max_req_queues = ha->msix_count - 1;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+ }
}
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
@@ -3098,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
+ qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->entry = i;
qentry->have_irq = 0;
- qentry->rsp = NULL;
+ qentry->in_use = 0;
+ qentry->handle = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
qentry = &ha->msix_entries[i];
- qentry->rsp = rsp;
+ qentry->handle = rsp;
rsp->msix = qentry;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3123,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
+ qentry->in_use = 1;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3142,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* queue.
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- qentry = &ha->msix_entries[ATIO_VECTOR];
- qentry->rsp = rsp;
+ qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
+ qentry->handle = rsp;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[QLA_ATIO_VECTOR].name);
+ qentry->in_use = 1;
ret = request_irq(qentry->vector,
- qla83xx_msix_entries[ATIO_VECTOR].handler,
- 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ msix_entries[QLA_ATIO_VECTOR].handler,
+ 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
}
@@ -3156,7 +3129,7 @@ msix_register_fail:
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
- qla24xx_disable_msix(ha);
+ qla2x00_free_irqs(vha);
ha->mqenable = 0;
goto msix_out;
}
@@ -3164,11 +3137,13 @@ msix_register_fail:
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
} else
- if (ha->mqiobase
- && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ if (ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3178,7 +3153,6 @@ msix_register_fail:
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
- kfree(entries);
return ret;
}
@@ -3231,7 +3205,7 @@ skip_msix:
!IS_QLA27XX(ha))
goto skip_msi;
- ret = pci_enable_msi(ha->pdev);
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
if (!ret) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
@@ -3276,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp;
+ struct qla_msix_entry *qentry;
+ int i;
/*
* We need to check that ha->rsp_q_map is valid in case we are called
@@ -3285,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
return;
rsp = ha->rsp_q_map[0];
- if (ha->flags.msix_enabled)
- qla24xx_disable_msix(ha);
- else if (ha->flags.msi_enabled) {
- free_irq(ha->pdev->irq, rsp);
- pci_disable_msi(ha->pdev);
- } else
- free_irq(ha->pdev->irq, rsp);
-}
+ if (ha->flags.msix_enabled) {
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ if (qentry->have_irq) {
+ irq_set_affinity_notifier(qentry->vector, NULL);
+ free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+ }
+ }
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled MSI-X.\n");
+ } else {
+ free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+ }
+ pci_free_irq_vectors(ha->pdev);
+}
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+ struct qla_msix_entry *msix, int vector_type)
{
- struct qla_hw_data *ha = rsp->hw;
- struct qla_init_msix_entry *intr = &msix_entries[2];
- struct qla_msix_entry *msix = rsp->msix;
+ struct qla_init_msix_entry *intr = &msix_entries[vector_type];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
- ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ scnprintf(msix->name, sizeof(msix->name),
+ "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+ ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
if (ret) {
ql_log(ql_log_fatal, vha, 0x00e6,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3311,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
return ret;
}
msix->have_irq = 1;
- msix->rsp = rsp;
+ msix->handle = qpair;
return ret;
}
@@ -3324,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
+ struct rsp_que *rsp = e->handle;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
- ha = e->rsp->hw;
+ ha = rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3352,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
- struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+ struct rsp_que *rsp = e->handle;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d \n", __func__,
+ "%s: host%ld: vector %d cpu %d\n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c998699..2819ceb96041 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,43 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+struct rom_cmd {
+ uint16_t cmd;
+} rom_cmds[] = {
+ { MBC_LOAD_RAM },
+ { MBC_EXECUTE_FIRMWARE },
+ { MBC_READ_RAM_WORD },
+ { MBC_MAILBOX_REGISTER_TEST },
+ { MBC_VERIFY_CHECKSUM },
+ { MBC_GET_FIRMWARE_VERSION },
+ { MBC_LOAD_RISC_RAM },
+ { MBC_DUMP_RISC_RAM },
+ { MBC_LOAD_RISC_RAM_EXTENDED },
+ { MBC_DUMP_RISC_RAM_EXTENDED },
+ { MBC_WRITE_RAM_WORD_EXTENDED },
+ { MBC_READ_RAM_EXTENDED },
+ { MBC_GET_RESOURCE_COUNTS },
+ { MBC_SET_FIRMWARE_OPTION },
+ { MBC_MID_INITIALIZE_FIRMWARE },
+ { MBC_GET_FIRMWARE_STATE },
+ { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+ { MBC_GET_RETRY_COUNT },
+ { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+ int i;
+ struct rom_cmd *wc;
+
+ for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+ wc = rom_cmds + i;
+ if (wc->cmd == cmd)
+ return 1;
+ }
+
+ return 0;
+}
/*
* qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ /* check if ISP abort is active and return cmd with timeout */
+ if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) {
+ ql_log(ql_log_info, vha, 0x1005,
+ "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ wait_time = jiffies;
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (time_after(jiffies, wait_time + 5 * HZ))
+ ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+ command, jiffies_to_msecs(jiffies - wait_time));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+ else
+ req = vha->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->vp_idx && vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
- if (ql2xmaxqueues > 1)
- req = ha->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ struct qla_qpair *qpair;
vha = fcport->vha;
ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- rsp = ha->rsp_q_map[tag + 1];
- else
+ if (vha->vp_idx && vha->qpair) {
+ /* NPIV port */
+ qpair = vha->qpair;
+ rsp = qpair->rsp;
+ req = qpair->req;
+ } else {
rsp = req->rsp;
+ }
+
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..c6d6f0d912ff 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
- free_irq(rsp->msix->vector, rsp);
+ free_irq(rsp->msix->vector, rsp->msix->handle);
rsp->msix->have_irq = 0;
- rsp->msix->rsp = NULL;
+ rsp->msix->in_use = 0;
+ rsp->msix->handle = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
return ret;
}
-static int
+int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair, *tqpair;
- /* Delete request queues */
- for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
- req = ha->req_q_map[cnt];
- if (req && test_bit(cnt, ha->req_qid_map)) {
- ret = qla25xx_delete_req_que(vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00ea,
- "Couldn't delete req que %d.\n",
- req->id);
- return ret;
+ if (ql2xmqsupport) {
+ list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+ qp_list_elem)
+ qla2xxx_delete_qpair(vha, qpair);
+ } else {
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
+ return ret;
+ }
}
}
- }
- /* Delete response queues */
- for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
- rsp = ha->rsp_q_map[cnt];
- if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00eb,
- "Couldn't delete rsp que %d.\n",
- rsp->id);
- return ret;
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
+ return ret;
+ }
}
}
}
+
return ret;
}
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS)
goto que_failed;
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req->out_ptr = (void *)(req->ring + req->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
@@ -741,20 +750,20 @@ failed:
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
- struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
struct scsi_qla_host *vha;
- struct qla_hw_data *ha = rsp->hw;
+ struct qla_hw_data *ha = qpair->hw;
- spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ spin_lock_irqsave(&qpair->qp_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- qla24xx_process_response_queue(vha, rsp);
- spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int req)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
{
int ret = 0;
struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
- if (ha->flags.msix_enabled)
- rsp->msix = &ha->msix_entries[que_id + 1];
- else
- ql_log(ql_log_warn, base_vha, 0x00e3,
- "MSIX not enabled.\n");
+ rsp->msix = qpair->msix;
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
- "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
+ /* Set option to indicate response queue creation */
+ options |= BIT_1;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(rsp);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
if (ret)
goto que_failed;
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
- if (req >= 0)
- rsp->req = ha->req_q_map[req];
- else
- rsp->req = NULL;
+ rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
- if (rsp->hw->wq)
- INIT_WORK(&rsp->q_work, qla_do_work);
+ if (qpair->hw->wq)
+ INIT_WORK(&qpair->q_work, qla_do_work);
return rsp->id;
que_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 56d6142852a5..8521cfe302e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
/*
* SRB allocation cache
*/
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
/*
* CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
- "Enables MQ settings "
- "Default is 1 for single queue. Set it to number "
- "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
- "Enables CPU affinity settings for the driver "
- "Default is 0 for no affinity of request and response IO. "
- "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+ "Enable on demand multiple queue pairs support "
+ "Default is 1 for supported. "
+ "Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
+ .map_queues = qla2xxx_map_queues,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+
+ if (ql2xmqsupport && ha->max_qpairs) {
+ ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->queue_pair_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for queue pair ptrs.\n");
+ goto fail_qpair_map;
+ }
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ }
+
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
+fail_base_qpair:
+ kfree(ha->queue_pair_map);
+fail_qpair_map:
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
fail_rsp_map:
kfree(ha->req_q_map);
ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
struct req_que *req;
struct rsp_que *rsp;
int cnt;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->req_q_map[cnt] = NULL;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_req_que(ha, req);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
kfree(ha->req_q_map);
ha->req_q_map = NULL;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->rsp_q_map[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- kfree(ha->rsp_q_map);
- ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
- uint16_t options = 0;
- int ques, req, ret;
- struct qla_hw_data *ha = vha->hw;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (!(ha->fw_attributes & BIT_6)) {
- ql_log(ql_log_warn, vha, 0x00d8,
- "Firmware is not multi-queue capable.\n");
- goto fail;
- }
- if (ql2xmultique_tag) {
- /* create a request queue for IO */
- options |= BIT_7;
- req = qla25xx_create_req_que(ha, options, 0, 0, -1,
- QLA_DEFAULT_QUE_QOS);
- if (!req) {
- ql_log(ql_log_warn, vha, 0x00e0,
- "Failed to create request queue.\n");
- goto fail;
- }
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- vha->req = ha->req_q_map[req];
- options |= BIT_1;
- for (ques = 1; ques < ha->max_rsp_queues; ques++) {
- ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
- if (!ret) {
- ql_log(ql_log_warn, vha, 0x00e8,
- "Failed to create response queue.\n");
- goto fail2;
- }
- }
- ha->flags.cpu_affinity_enabled = 1;
- ql_dbg(ql_dbg_multiq, vha, 0xc007,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- }
- return 0;
-fail2:
- qla25xx_delete_queues(vha);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- vha->req = ha->req_q_map[0];
-fail:
- ha->mqenable = 0;
- kfree(ha->req_q_map);
kfree(ha->rsp_q_map);
- ha->max_req_queues = ha->max_rsp_queues = 1;
- return 1;
+ ha->rsp_q_map = NULL;
}
static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
-static void
+void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
+
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ }
+
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ WARN_ON(atomic_read(&sp->ref_count) == 0);
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ cmd->scsi_done(cmd);
+}
+
/* If we are SP1 here, we need to still take and release the host_lock as SP1
* does not have the changes necessary to avoid taking host->host_lock.
*/
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
+ struct qla_qpair *qpair = NULL;
+ uint32_t tag;
+ uint16_t hwq;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
+ if (ha->mqenable) {
+ if (shost_use_blk_mq(vha->host)) {
+ tag = blk_mq_unique_tag(cmd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ qpair = ha->queue_pair_map[hwq];
+ } else if (vha->vp_idx && vha->qpair) {
+ qpair = vha->qpair;
+ }
+
+ if (qpair)
+ return qla2xxx_mqueuecommand(host, cmd, qpair);
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
return 0;
}
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ cmd->result = rval;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
+ goto qc24_fail_command;
+ }
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+ goto qc24_target_busy;
+ }
+
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto qc24_host_busy;
+
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2xxx_qpair_sp_free_dma;
+ sp->done = qla2xxx_qpair_sp_compl;
+ sp->qpair = qpair;
+
+ rval = ha->isp_ops->start_scsi_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ if (rval == QLA_INTERFACE_ERROR)
+ goto qc24_fail_command;
+ goto qc24_host_busy_free_sp;
+ }
+
+ return 0;
+
+qc24_host_busy_free_sp:
+ qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
"MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
+ ha->msix_count = msix + 1;
/* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+ /* Queue pairs is the max value minus the base queue pair */
+ ha->max_qpairs = ha->max_rsp_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
"BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
qla83xx_iospace_config(struct qla_hw_data *ha)
{
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
+ ha->msix_count = msix + 1;
+ /*
+ * By default, driver uses at least two msix vectors
+ * (default & rspq)
+ */
+ if (ql2xmqsupport) {
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ /* Queue pairs is the max value minus
+ * the base queue pair */
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
"BAR 1 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ if (QLA_TGT_MODE_ENABLED())
+ ha->msix_count++;
qlt_83xx_iospace_config(ha);
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla8044_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ int i;
+
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X irq %d iobase 0x%p.\n",
pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
+ mutex_init(&ha->mq_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
-que_init:
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+ goto probe_init_failed;
+
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
goto probe_init_failed;
}
- qlt_probe_one_stage1(base_vha, ha);
+ if (ha->mqenable && shost_use_blk_mq(host)) {
+ /* number of hardware queues supported by blk/scsi-mq*/
+ host->nr_hw_queues = ha->max_qpairs;
- /* Set up the irqs */
- ret = qla2x00_request_irqs(ha, rsp);
- if (ret)
- goto probe_init_failed;
+ ql_dbg(ql_dbg_init, base_vha, 0x0192,
+ "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+ } else
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+
+ qlt_probe_one_stage1(base_vha, ha);
pci_save_state(pdev);
@@ -2842,11 +3013,12 @@ que_init:
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable) {
- if (qla25xx_setup_mode(base_vha)) {
- ql_log(ql_log_warn, base_vha, 0x00ec,
- "Failed to create queues, falling back to single queue mode.\n");
- goto que_init;
+ if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ /* Create start of day qpairs for Block MQ */
+ if (shost_use_blk_mq(host)) {
+ for (i = 0; i < ha->max_qpairs; i++)
+ qla2xxx_create_qpair(base_vha, 5, 0);
}
}
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
static void
qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
{
- /* Flush the work queue and remove it */
- if (ha->wq) {
- flush_workqueue(ha->wq);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- }
-
/* Cancel all work and destroy DPC workqueues */
if (ha->dpc_lp_wq) {
cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
}
+ qla2x00_free_fcports(vha);
+
qla2x00_free_irqs(vha);
- qla2x00_free_fcports(vha);
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
qla2x00_mem_free(ha);
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
+ INIT_LIST_HEAD(&vha->qp_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
base_vha->flags.init_done = 0;
qla25xx_delete_queues(base_vha);
- qla2x00_free_irqs(base_vha);
qla2x00_free_fcports(base_vha);
+ qla2x00_free_irqs(base_vha);
qla2x00_mem_free(ha);
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
{
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ uint32_t online;
+ struct qla_qpair *qpair;
ha = (struct qla_hw_data *)data;
base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
ha->isp_ops->beacon_blink(base_vha);
}
+ /* qpair online check */
+ if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+ &base_vha->dpc_flags)) {
+ if (ha->flags.eeh_busy ||
+ ha->flags.pci_channel_io_perm_failure)
+ online = 0;
+ else
+ online = 1;
+
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list,
+ qp_list_elem)
+ qpair->online = online;
+ mutex_unlock(&ha->mq_lock);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
qla83xx_wr_reg(vha, reg, data);
}
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+ return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 07349270535d..82dfe07b1d47 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aa43bfea0d00..abe617372661 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -23,6 +23,7 @@
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@@ -1031,6 +1032,34 @@ out:
return ret;
}
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ return err;
+}
+
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
*/
host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
+ if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+ __func__, err);
+ goto out_variant_clear;
+ } else if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
- ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a57ce2..fe517cd7dac3 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7983058f3f7..08b799d4efcc 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_NO_FASTAUTO),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
- END_FIX
-};
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ef8548c3a423..a2c2817fc566 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == UFSHCI_VERSION_10)
- return INTERRUPT_MASK_ALL_VER_10;
- else
- return INTERRUPT_MASK_ALL_VER_11;
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ }
+
+ return intr_mask;
}
/**
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
}
/**
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7d9ff22acfea..08cd26ed2382 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
* @setup_task_mgmt: called before any task management request is issued
* to set some things
* @hibern8_notify: called around hibern8 enter/exit
- * to configure some things
+ * @apply_dev_quirks: called to apply device specific quirks
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
void (*setup_xfer_req)(struct ufs_hba *, int, bool);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
return hba->vops->hibern8_notify(hba, cmd, status);
}
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->apply_dev_quirks)
+ return hba->vops->apply_dev_quirks(hba);
+ return 0;
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5d978867be57..8c5190e2e1c9 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,10 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
/* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
/* Interrupt disable mask for UFSHCI v1.1 */
INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/*