aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 17:46:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 17:46:21 -0700
commitda4f58ffa08a7b7012fab9c205fa0f6ba40fec42 (patch)
tree76317ee98eadbb8457e045dd0375652b14224451
parentMerge branch 'usb-target-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending (diff)
parentMerge tag 'isci-for-3.5' into misc (diff)
downloadlinux-dev-da4f58ffa08a7b7012fab9c205fa0f6ba40fec42.tar.xz
linux-dev-da4f58ffa08a7b7012fab9c205fa0f6ba40fec42.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI misc update from James Bottomley: "The patch contains the usual assortment of driver updates (be2iscsi, bfa, bnx2i, fcoe, hpsa, isci, lpfc, megaraid, mpt2sas, pm8001, sg) plus an assortment of other changes and fixes. Also new is the fact that the isci update is delivered as a git merge (with signed tag)." * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (158 commits) isci: End the RNC resumption wait when the RNC is destroyed. isci: Fixed RNC bug that lost the suspension or resumption during destroy isci: Fix RNC AWAIT_SUSPENSION->INVALIDATING transition. isci: Manage the IREQ_NO_AUTO_FREE_TAG under scic_lock. isci: Remove obviated host callback list. isci: Check IDEV_GONE before performing abort path operations. isci: Restore the ATAPI device RNC management code. isci: Don't wait for an RNC suspend if it's being destroyed. isci: Change the phy control and link reset interface for HW reasons. isci: Added timeouts to RNC suspensions in the abort path. isci: Add protocol indicator for TMF requests. isci: Directly control IREQ_ABORT_PATH_ACTIVE when completing TMFs. isci: Wait for RNC resumption before leaving the abort path. isci: Fix RNC suspend call for SCI_RESUMING state. isci: Manage tag releases differently when aborting tasks. isci: Callbacks to libsas occur under scic_lock and are synchronized. isci: When in the abort path, defeat other resume calls until done. isci: Implement waiting for suspend in the abort path. isci: Make sure all TCs are terminated and cleaned in LUN reset. isci: Manage the LLHANG timer enable/disable per-device. ...
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas8
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h154
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c493
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c447
-rw-r--r--drivers/scsi/be2iscsi/be_main.h17
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c522
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h50
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c32
-rw-r--r--drivers/scsi/bfa/bfad.c17
-rw-r--r--drivers/scsi/bfa/bfad_attr.c20
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c70
-rw-r--r--drivers/scsi/fcoe/fcoe.c41
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c8
-rw-r--r--drivers/scsi/hpsa.c683
-rw-r--r--drivers/scsi/hpsa.h85
-rw-r--r--drivers/scsi/hpsa_cmd.h37
-rw-r--r--drivers/scsi/isci/host.c703
-rw-r--r--drivers/scsi/isci/host.h124
-rw-r--r--drivers/scsi/isci/init.c212
-rw-r--r--drivers/scsi/isci/phy.c76
-rw-r--r--drivers/scsi/isci/phy.h9
-rw-r--r--drivers/scsi/isci/port.c68
-rw-r--r--drivers/scsi/isci/port.h11
-rw-r--r--drivers/scsi/isci/port_config.c18
-rw-r--r--drivers/scsi/isci/probe_roms.c12
-rw-r--r--drivers/scsi/isci/probe_roms.h2
-rw-r--r--drivers/scsi/isci/registers.h8
-rw-r--r--drivers/scsi/isci/remote_device.c576
-rw-r--r--drivers/scsi/isci/remote_device.h63
-rw-r--r--drivers/scsi/isci/remote_node_context.c393
-rw-r--r--drivers/scsi/isci/remote_node_context.h43
-rw-r--r--drivers/scsi/isci/request.c715
-rw-r--r--drivers/scsi/isci/request.h125
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h2
-rw-r--r--drivers/scsi/isci/task.c800
-rw-r--r--drivers/scsi/isci/task.h132
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h6
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h418
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c377
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c414
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c785
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c21
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h68
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c312
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c597
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c243
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c23
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c10
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_pm.c2
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c24
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sg.c183
-rw-r--r--drivers/scsi/st.h2
-rw-r--r--drivers/scsi/storvsc_drv.c20
-rw-r--r--drivers/scsi/ufs/ufshcd.c8
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/sas.h1
99 files changed, 6500 insertions, 4257 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 83f8ea8b79eb..80441ab608e4 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,11 @@
+Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.06.15-rc1
+Old Version : 00.00.06.14-rc1
+ 1. Optimize HostMSIxVectors setting.
+ 2. Add fpRead/WriteCapable, fpRead/WriteAcrossStripe checks.
+-------------------------------------------------------------------------------
Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index 7560921a4e15..fddf29c057a1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1599,6 +1599,7 @@ F: include/linux/bcma/
BROCADE BFA FC SCSI DRIVER
M: Jing Huang <huangj@brocade.com>
+M: Krishna C Gudipati <kgudipat@brocade.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/bfa/
@@ -6882,6 +6883,14 @@ F: Documentation/cdrom/
F: drivers/cdrom/cdrom.c
F: include/linux/cdrom.h
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
+M: Vinayak Holikatti <vinholikatti@gmail.com>
+M: Santosh Y <santoshsy@gmail.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: Documentation/scsi/ufs.txt
+F: drivers/scsi/ufs/
+
UNSORTED BLOCK IMAGES (UBI)
M: Artem Bityutskiy <dedekind1@gmail.com>
W: http://www.linux-mtd.infradead.org/
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 29684c8142b0..bea04e5d3b51 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -408,6 +408,7 @@ config BLK_DEV_3W_XXXX_RAID
config SCSI_HPSA
tristate "HP Smart Array SCSI driver"
depends on PCI && SCSI
+ select CHECK_SIGNATURE
help
This driver supports HP Smart Array Controllers (circa 2009).
It is a SCSI alternative to the cciss driver, which is a block
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2bee51506a91..762820636304 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -424,6 +424,8 @@ static int aac_src_deliver_message(struct fib *fib)
static int aac_src_ioremap(struct aac_dev *dev, u32 size)
{
if (!size) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
iounmap(dev->regs.src.bar0);
dev->base = dev->regs.src.bar0 = NULL;
return 0;
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1d7b976c850f..a50b6a9030e8 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -132,10 +132,6 @@ struct be_ctrl_info {
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
-/* Byte offset into the page corresponding to given address */
-#define OFFSET_IN_PAGE(addr) \
- ((size_t)(addr) & (PAGE_SIZE_4K-1))
-
/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field) \
(((size_t)&(((_struct *)0)->field))%32)
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index cdb15364bc69..d2e9e933f7a3 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -15,6 +15,8 @@
* Costa Mesa, CA 92626
*/
+#include <scsi/iscsi_proto.h>
+
#include "be.h"
#include "be_mgmt.h"
#include "be_main.h"
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 8b40a5b4366c..b0b36c6a145f 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -23,7 +23,7 @@
* firmware in the BE. These requests are communicated to the processor
* using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
* WRB inside a MAILBOX.
- * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ * The commands are serviced by the ARM processor in the OneConnect's MPU.
*/
struct be_sge {
u32 pa_lo;
@@ -163,7 +163,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14
-#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR 17
+#define OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR 17
+#define OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR 18
#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21
#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22
#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
@@ -274,15 +275,15 @@ struct mgmt_conn_login_options {
struct mgmt_auth_method_format auth_data;
} __packed;
-struct ip_address_format {
+struct ip_addr_format {
u16 size_of_structure;
u8 reserved;
u8 ip_type;
- u8 ip_address[16];
+ u8 addr[16];
u32 rsvd0;
} __packed;
-struct mgmt_conn_info {
+struct mgmt_conn_info {
u32 connection_handle;
u32 connection_status;
u16 src_port;
@@ -290,9 +291,9 @@ struct mgmt_conn_info {
u16 dest_port_redirected;
u16 cid;
u32 estimated_throughput;
- struct ip_address_format src_ipaddr;
- struct ip_address_format dest_ipaddr;
- struct ip_address_format dest_ipaddr_redirected;
+ struct ip_addr_format src_ipaddr;
+ struct ip_addr_format dest_ipaddr;
+ struct ip_addr_format dest_ipaddr_redirected;
struct mgmt_conn_login_options negotiated_login_options;
} __packed;
@@ -322,43 +323,115 @@ struct mgmt_session_info {
struct mgmt_conn_info conn_list[1];
} __packed;
-struct be_cmd_req_get_session {
+struct be_cmd_get_session_req {
struct be_cmd_req_hdr hdr;
u32 session_handle;
} __packed;
-struct be_cmd_resp_get_session {
+struct be_cmd_get_session_resp {
struct be_cmd_resp_hdr hdr;
struct mgmt_session_info session_info;
} __packed;
struct mac_addr {
- u16 size_of_struct;
+ u16 size_of_structure;
u8 addr[ETH_ALEN];
} __packed;
-struct be_cmd_req_get_boot_target {
+struct be_cmd_get_boot_target_req {
struct be_cmd_req_hdr hdr;
} __packed;
-struct be_cmd_resp_get_boot_target {
+struct be_cmd_get_boot_target_resp {
struct be_cmd_resp_hdr hdr;
u32 boot_session_count;
int boot_session_handle;
};
-struct be_cmd_req_mac_query {
+struct be_cmd_mac_query_req {
struct be_cmd_req_hdr hdr;
u8 type;
u8 permanent;
u16 if_id;
} __packed;
-struct be_cmd_resp_mac_query {
+struct be_cmd_get_mac_resp {
struct be_cmd_resp_hdr hdr;
struct mac_addr mac;
};
+struct be_ip_addr_subnet_format {
+ u16 size_of_structure;
+ u8 ip_type;
+ u8 ipv6_prefix_length;
+ u8 addr[16];
+ u8 subnet_mask[16];
+ u32 rsvd0;
+} __packed;
+
+struct be_cmd_get_if_info_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_get_if_info_resp {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 vlan_priority;
+ u32 ip_addr_count;
+ u32 dhcp_state;
+ struct be_ip_addr_subnet_format ip_addr;
+} __packed;
+
+struct be_ip_addr_record {
+ u32 action;
+ u32 interface_hndl;
+ struct be_ip_addr_subnet_format ip_addr;
+ u32 status;
+} __packed;
+
+struct be_ip_addr_record_params {
+ u32 record_entry_count;
+ struct be_ip_addr_record ip_record;
+} __packed;
+
+struct be_cmd_set_ip_addr_req {
+ struct be_cmd_req_hdr hdr;
+ struct be_ip_addr_record_params ip_params;
+} __packed;
+
+
+struct be_cmd_set_dhcp_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+ u32 flags;
+ u32 retry_count;
+} __packed;
+
+struct be_cmd_rel_dhcp_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_set_def_gateway_req {
+ struct be_cmd_req_hdr hdr;
+ u32 action;
+ struct ip_addr_format ip_addr;
+} __packed;
+
+struct be_cmd_get_def_gateway_req {
+ struct be_cmd_req_hdr hdr;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_get_def_gateway_resp {
+ struct be_cmd_req_hdr hdr;
+ struct ip_addr_format ip_addr;
+} __packed;
+
/******************** Create CQ ***************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -489,7 +562,7 @@ struct be_cmd_req_modify_eq_delay {
#define ETH_ALEN 6
-struct be_cmd_req_get_mac_addr {
+struct be_cmd_get_nic_conf_req {
struct be_cmd_req_hdr hdr;
u32 nic_port_count;
u32 speed;
@@ -501,7 +574,7 @@ struct be_cmd_req_get_mac_addr {
u32 rsvd[23];
};
-struct be_cmd_resp_get_mac_addr {
+struct be_cmd_get_nic_conf_resp {
struct be_cmd_resp_hdr hdr;
u32 nic_port_count;
u32 speed;
@@ -513,6 +586,39 @@ struct be_cmd_resp_get_mac_addr {
u32 rsvd[23];
};
+#define BEISCSI_ALIAS_LEN 32
+
+struct be_cmd_hba_name {
+ struct be_cmd_req_hdr hdr;
+ u16 flags;
+ u16 rsvd0;
+ u8 initiator_name[ISCSI_NAME_LEN];
+ u8 initiator_alias[BEISCSI_ALIAS_LEN];
+} __packed;
+
+struct be_cmd_ntwk_link_status_req {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+} __packed;
+
+/*** Port Speed Values ***/
+#define BE2ISCSI_LINK_SPEED_ZERO 0x00
+#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
+struct be_cmd_ntwk_link_status_resp {
+ struct be_cmd_resp_hdr hdr;
+ u8 phys_port;
+ u8 mac_duplex;
+ u8 mac_speed;
+ u8 mac_fault;
+ u8 mgmt_mac_duplex;
+ u8 mgmt_mac_speed;
+ u16 qos_link_speed;
+ u32 logical_link_speed;
+} __packed;
+
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay);
@@ -530,11 +636,8 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
int be_poll_mcc(struct be_ctrl_info *ctrl);
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
-unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
-unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba);
-unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
- u32 boot_session_handle,
- struct be_dma_mem *nonemb_cmd);
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
/*ISCSI Functuions */
@@ -715,7 +818,7 @@ struct be_eq_delay_params_in {
struct tcp_connect_and_offload_in {
struct be_cmd_req_hdr hdr;
- struct ip_address_format ip_address;
+ struct ip_addr_format ip_address;
u16 tcp_port;
u16 cid;
u16 cq_id;
@@ -792,13 +895,14 @@ struct be_fw_cfg {
u32 function_caps;
} __packed;
-struct be_all_if_id {
+struct be_cmd_get_all_if_id_req {
struct be_cmd_req_hdr hdr;
u32 if_count;
u32 if_hndl_list[1];
} __packed;
#define ISCSI_OPCODE_SCSI_DATA_OUT 5
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
#define OPCODE_COMMON_ISCSI_CLEANUP 59
#define OPCODE_COMMON_TCP_UPLOAD 56
@@ -810,6 +914,8 @@ struct be_all_if_id {
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
+#define OPCODE_COMMON_WRITE_FLASH 96
+#define OPCODE_COMMON_READ_FLASH 97
/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
#define CMD_ISCSI_COMMAND_INVALIDATE 1
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 33c8f09c7ac1..43f35034585d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -23,6 +23,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_netlink.h>
+#include <net/netlink.h>
#include <scsi/scsi.h>
#include "be_iscsi.h"
@@ -207,6 +209,301 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
}
+static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+{
+ if (phba->ipv4_iface)
+ return 0;
+
+ phba->ipv4_iface = iscsi_create_iface(phba->shost,
+ &beiscsi_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV4,
+ 0, 0);
+ if (!phba->ipv4_iface) {
+ shost_printk(KERN_ERR, phba->shost, "Could not "
+ "create default IPv4 address.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+{
+ if (phba->ipv6_iface)
+ return 0;
+
+ phba->ipv6_iface = iscsi_create_iface(phba->shost,
+ &beiscsi_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6,
+ 0, 0);
+ if (!phba->ipv6_iface) {
+ shost_printk(KERN_ERR, phba->shost, "Could not "
+ "create default IPv6 address.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+{
+ struct be_cmd_get_if_info_resp if_info;
+
+ if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+ beiscsi_create_ipv4_iface(phba);
+
+ if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+ beiscsi_create_ipv6_iface(phba);
+}
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+{
+ if (phba->ipv6_iface)
+ iscsi_destroy_iface(phba->ipv6_iface);
+ if (phba->ipv4_iface)
+ iscsi_destroy_iface(phba->ipv4_iface);
+}
+
+static int
+beiscsi_set_static_ip(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_iface_param_info *iface_ip = NULL;
+ struct iscsi_iface_param_info *iface_subnet = NULL;
+ struct nlattr *nla;
+ int ret;
+
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+ if (nla)
+ iface_ip = nla_data(nla);
+
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+ if (nla)
+ iface_subnet = nla_data(nla);
+ break;
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ iface_ip = iface_param;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+ if (nla)
+ iface_subnet = nla_data(nla);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ iface_subnet = iface_param;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+ if (nla)
+ iface_ip = nla_data(nla);
+ break;
+ default:
+ shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
+ iface_param->param);
+ }
+
+ if (!iface_ip || !iface_subnet) {
+ shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
+ return -EINVAL;
+ }
+
+ ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
+ ISCSI_BOOTPROTO_STATIC);
+
+ return ret;
+}
+
+static int
+beiscsi_set_ipv4(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ /* Check the param */
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_GW:
+ ret = mgmt_set_gateway(phba, iface_param);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+ ret = mgmt_set_ip(phba, iface_param,
+ NULL, ISCSI_BOOTPROTO_DHCP);
+ else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+ ret = beiscsi_set_static_ip(shost, iface_param,
+ data, dt_len);
+ else
+ shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
+ iface_param->value[0]);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+ ret = beiscsi_create_ipv4_iface(phba);
+ else
+ iscsi_destroy_iface(phba->ipv4_iface);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ ret = beiscsi_set_static_ip(shost, iface_param,
+ data, dt_len);
+ break;
+ default:
+ shost_printk(KERN_ERR, shost, "Param %d not supported\n",
+ iface_param->param);
+ }
+
+ return ret;
+}
+
+static int
+beiscsi_set_ipv6(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+ ret = beiscsi_create_ipv6_iface(phba);
+ else {
+ iscsi_destroy_iface(phba->ipv6_iface);
+ ret = 0;
+ }
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ ret = mgmt_set_ip(phba, iface_param, NULL,
+ ISCSI_BOOTPROTO_STATIC);
+ break;
+ default:
+ shost_printk(KERN_ERR, shost, "Param %d not supported\n",
+ iface_param->param);
+ }
+
+ return ret;
+}
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+ void *data, uint32_t dt_len)
+{
+ struct iscsi_iface_param_info *iface_param = NULL;
+ struct nlattr *attrib;
+ uint32_t rm_len = dt_len;
+ int ret = 0 ;
+
+ nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ iface_param = nla_data(attrib);
+
+ if (iface_param->param_type != ISCSI_NET_PARAM)
+ continue;
+
+ /*
+ * BE2ISCSI only supports 1 interface
+ */
+ if (iface_param->iface_num) {
+ shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
+ "Only iface_num 0 is supported.\n",
+ iface_param->iface_num);
+ return -EINVAL;
+ }
+
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ ret = beiscsi_set_ipv4(shost, iface_param,
+ data, dt_len);
+ break;
+ case ISCSI_IFACE_TYPE_IPV6:
+ ret = beiscsi_set_ipv6(shost, iface_param,
+ data, dt_len);
+ break;
+ default:
+ shost_printk(KERN_ERR, shost,
+ "Invalid iface type :%d passed\n",
+ iface_param->iface_type);
+ break;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
+ struct iscsi_iface *iface, int param,
+ char *buf)
+{
+ struct be_cmd_get_if_info_resp if_info;
+ int len, ip_type = BE2_IPV4;
+
+ memset(&if_info, 0, sizeof(if_info));
+
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ ip_type = BE2_IPV6;
+
+ len = mgmt_get_if_info(phba, ip_type, &if_info);
+ if (len)
+ return len;
+
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (!if_info.dhcp_state)
+ len = sprintf(buf, "static");
+ else
+ len = sprintf(buf, "dhcp");
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return len;
+}
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct be_cmd_get_def_gateway_resp gateway;
+ int len = -ENOSYS;
+
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ len = be2iscsi_get_if_param(phba, iface, param, buf);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ len = sprintf(buf, "enabled");
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ memset(&gateway, 0, sizeof(gateway));
+ len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+ if (!len)
+ len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+
+ return len;
+}
+
/**
* beiscsi_ep_get_param - get the iscsi parameter
* @ep: pointer to iscsi ep
@@ -221,7 +518,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -279,6 +576,121 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
+ * beiscsi_get_initname - Read Initiator Name from flash
+ * @buf: buffer bointer
+ * @phba: The device priv structure instance
+ *
+ * returns number of bytes
+ */
+static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
+{
+ int rc;
+ unsigned int tag, wrb_num;
+ unsigned short status, extd_status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_hba_name *resp;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ tag = be_cmd_get_initname(phba);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
+ return -EBUSY;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
+ "status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EAGAIN;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = embedded_payload(wrb);
+ rc = sprintf(buf, "%s\n", resp->initiator_name);
+ return rc;
+}
+
+/**
+ * beiscsi_get_port_state - Get the Port State
+ * @shost : pointer to scsi_host structure
+ *
+ * returns number of bytes
+ */
+static void beiscsi_get_port_state(struct Scsi_Host *shost)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+ ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
+}
+
+/**
+ * beiscsi_get_port_speed - Get the Port Speed from Adapter
+ * @shost : pointer to scsi_host structure
+ *
+ * returns Success/Failure
+ */
+static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+{
+ unsigned int tag, wrb_num;
+ unsigned short status, extd_status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_ntwk_link_status_resp *resp;
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ tag = be_cmd_get_port_speed(phba);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
+ return -EBUSY;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
+ "status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EAGAIN;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = embedded_payload(wrb);
+
+ switch (resp->mac_speed) {
+ case BE2ISCSI_LINK_SPEED_10MBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_100MBPS:
+ ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_1GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_10GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
+ break;
+ default:
+ ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
+ }
+ return 0;
+}
+
+/**
* beiscsi_get_host_param - get the iscsi parameter
* @shost: pointer to scsi_host structure
* @param: parameter type identifier
@@ -301,6 +713,27 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
return status;
}
break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ status = beiscsi_get_initname(buf, phba);
+ if (status < 0) {
+ SE_DEBUG(DBG_LVL_1,
+ "Retreiving Initiator Name Failed\n");
+ return status;
+ }
+ break;
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ beiscsi_get_port_state(shost);
+ status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+ break;
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ status = beiscsi_get_port_speed(shost);
+ if (status) {
+ SE_DEBUG(DBG_LVL_1,
+ "Retreiving Port Speed Failed\n");
+ return status;
+ }
+ status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+ break;
default:
return iscsi_host_get_param(shost, param, buf);
}
@@ -309,46 +742,21 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
{
- struct be_cmd_resp_get_mac_addr *resp;
- struct be_mcc_wrb *wrb;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ struct be_cmd_get_nic_conf_resp resp;
int rc;
- if (phba->read_mac_address)
- return sysfs_format_mac(buf, phba->mac_address,
- ETH_ALEN);
+ if (strlen(phba->mac_address))
+ return strlcpy(buf, phba->mac_address, PAGE_SIZE);
- tag = be_cmd_get_mac_addr(phba);
- if (!tag) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
- return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ memset(&resp, 0, sizeof(resp));
+ rc = mgmt_get_nic_conf(phba, &resp);
+ if (rc)
+ return rc;
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
- }
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
- resp = embedded_payload(wrb);
- memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
- rc = sysfs_format_mac(buf, phba->mac_address,
- ETH_ALEN);
- phba->read_mac_address = 1;
- return rc;
+ memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
+ return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
}
-
/**
* beiscsi_conn_get_stats - get the iscsi stats
* @cls_conn: pointer to iscsi cls conn
@@ -736,11 +1144,24 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
umode_t be2iscsi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
+ case ISCSI_NET_PARAM:
+ switch (param) {
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IPV4_GW:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
case ISCSI_HOST_PARAM:
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
- case ISCSI_HOST_PARAM_IPADDRESS:
case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ case ISCSI_HOST_PARAM_PORT_SPEED:
return S_IRUGO;
default:
return 0;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 5c45be134501..8b826fc06bcc 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -25,6 +25,21 @@
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
+#define BE2_DHCP_V4 0x05
+
+#define NON_BLOCKING 0x0
+#define BLOCKING 0x1
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf);
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+ void *data, uint32_t count);
umode_t be2iscsi_attr_is_visible(int param_type, int param);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 375756fa95cf..0b1d99c99fd2 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -28,8 +28,11 @@
#include <linux/semaphore.h>
#include <linux/iscsi_boot_sysfs.h>
#include <linux/module.h>
+#include <linux/bsg-lib.h>
#include <scsi/libiscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_cmnd.h>
@@ -48,7 +51,8 @@ static unsigned int num_hba = 0;
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_VERSION(BUILD_STR);
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
module_param(be_iopoll_budget, int, 0);
module_param(enable_msix, int, 0);
@@ -147,15 +151,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
struct invalidate_command_table *inv_tbl;
struct be_dma_mem nonemb_cmd;
unsigned int cid, tag, i, num_invalidate;
- int rc = FAILED;
/* invalidate iocbs */
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
spin_lock_bh(&session->lock);
- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
- goto unlock;
-
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
+ spin_unlock_bh(&session->lock);
+ return FAILED;
+ }
conn = session->leadconn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba;
@@ -208,9 +212,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
-unlock:
- spin_unlock_bh(&session->lock);
- return rc;
}
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
@@ -230,10 +231,10 @@ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
case ISCSI_BOOT_TGT_IP_ADDR:
if (boot_conn->dest_ipaddr.ip_type == 0x1)
rc = sprintf(buf, "%pI4\n",
- (char *)&boot_conn->dest_ipaddr.ip_address);
+ (char *)&boot_conn->dest_ipaddr.addr);
else
rc = sprintf(str, "%pI6\n",
- (char *)&boot_conn->dest_ipaddr.ip_address);
+ (char *)&boot_conn->dest_ipaddr.addr);
break;
case ISCSI_BOOT_TGT_PORT:
rc = sprintf(str, "%d\n", boot_conn->dest_port);
@@ -311,12 +312,8 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
rc = sprintf(str, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
- rc = beiscsi_get_macaddr(buf, phba);
- if (rc < 0) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
- return rc;
- }
- break;
+ rc = beiscsi_get_macaddr(str, phba);
+ break;
default:
rc = -ENOSYS;
break;
@@ -394,7 +391,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
static struct scsi_host_template beiscsi_sht = {
.module = THIS_MODULE,
- .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
+ .name = "Emulex 10Gbe open-iscsi Initiator Driver",
.proc_name = DRV_NAME,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
@@ -409,6 +406,8 @@ static struct scsi_host_template beiscsi_sht = {
.max_sectors = BEISCSI_MAX_SECTORS,
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
+ .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
+
};
static struct scsi_transport_template *beiscsi_scsi_transport;
@@ -435,6 +434,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
phba->shost = shost;
phba->pcidev = pci_dev_get(pcidev);
pci_set_drvdata(pcidev, phba);
+ phba->interface_handle = 0xFFFFFFFF;
if (iscsi_host_add(shost, &phba->pcidev->dev))
goto free_devices;
@@ -544,8 +544,7 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
&mbox_mem_alloc->dma);
if (!mbox_mem_alloc->va) {
beiscsi_unmap_pci_function(phba);
- status = -ENOMEM;
- return status;
+ return -ENOMEM;
}
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
@@ -1252,9 +1251,9 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
task = pwrb_handle->pio_handle;
io_task = task->dd_data;
- spin_lock(&phba->mgmt_sgl_lock);
+ spin_lock_bh(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->mgmt_sgl_lock);
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
spin_lock_bh(&session->lock);
free_wrb_handle(phba, pwrb_context, pwrb_handle);
spin_unlock_bh(&session->lock);
@@ -1370,8 +1369,6 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
struct be_bus_address phys_addr;
struct list_head *pbusy_list;
struct async_pdu_handle *pasync_handle = NULL;
- int buffer_len = 0;
- unsigned char buffer_index = -1;
unsigned char is_header = 0;
phys_addr.u.a32.address_lo =
@@ -1392,22 +1389,11 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
index) / 32] & PDUCQE_INDEX_MASK));
-
- buffer_len = (unsigned int)(phys_addr.u.a64.address -
- pasync_ctx->async_header.pa_base.u.a64.address);
-
- buffer_index = buffer_len /
- pasync_ctx->async_header.buffer_size;
-
break;
case UNSOL_DATA_NOTIFY:
pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
dw[offsetof(struct amap_i_t_dpdu_cqe,
index) / 32] & PDUCQE_INDEX_MASK));
- buffer_len = (unsigned long)(phys_addr.u.a64.address -
- pasync_ctx->async_data.pa_base.u.
- a64.address);
- buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
break;
default:
pbusy_list = NULL;
@@ -1418,11 +1404,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
return NULL;
}
- WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
WARN_ON(list_empty(pbusy_list));
list_for_each_entry(pasync_handle, pbusy_list, link) {
- WARN_ON(pasync_handle->consumed);
- if (pasync_handle->index == buffer_index)
+ if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
break;
}
@@ -1449,15 +1433,13 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
unsigned int num_entries, writables = 0;
unsigned int *pep_read_ptr, *pwritables;
-
+ num_entries = pasync_ctx->num_entries;
if (is_header) {
pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
pwritables = &pasync_ctx->async_header.writables;
- num_entries = pasync_ctx->async_header.num_entries;
} else {
pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
pwritables = &pasync_ctx->async_data.writables;
- num_entries = pasync_ctx->async_data.num_entries;
}
while ((*pep_read_ptr) != cq_index) {
@@ -1491,14 +1473,13 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
return 0;
}
-static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
+static void hwi_free_async_msg(struct beiscsi_hba *phba,
unsigned int cri)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle, *tmp_handle;
struct list_head *plist;
- unsigned int i = 0;
phwi_ctrlr = phba->phwi_ctrlr;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
@@ -1508,23 +1489,20 @@ static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
list_del(&pasync_handle->link);
- if (i == 0) {
+ if (pasync_handle->is_header) {
list_add_tail(&pasync_handle->link,
&pasync_ctx->async_header.free_list);
pasync_ctx->async_header.free_entries++;
- i++;
} else {
list_add_tail(&pasync_handle->link,
&pasync_ctx->async_data.free_list);
pasync_ctx->async_data.free_entries++;
- i++;
}
}
INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
- return 0;
}
static struct phys_addr *
@@ -1557,16 +1535,15 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
phwi_ctrlr = phba->phwi_ctrlr;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ num_entries = pasync_ctx->num_entries;
if (is_header) {
- num_entries = pasync_ctx->async_header.num_entries;
writables = min(pasync_ctx->async_header.writables,
pasync_ctx->async_header.free_entries);
pfree_link = pasync_ctx->async_header.free_list.next;
host_write_num = pasync_ctx->async_header.host_write_ptr;
ring_id = phwi_ctrlr->default_pdu_hdr.id;
} else {
- num_entries = pasync_ctx->async_data.num_entries;
writables = min(pasync_ctx->async_data.writables,
pasync_ctx->async_data.free_entries);
pfree_link = pasync_ctx->async_data.free_list.next;
@@ -1673,7 +1650,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
}
memcpy(pfirst_buffer + offset,
pasync_handle->pbuffer, buf_len);
- offset = buf_len;
+ offset += buf_len;
}
index++;
}
@@ -1682,10 +1659,9 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
(beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start),
phdr, hdr_len, pfirst_buffer,
- buf_len);
+ offset);
- if (status == 0)
- hwi_free_async_msg(phba, cri);
+ hwi_free_async_msg(phba, cri);
return 0;
}
@@ -2229,7 +2205,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
struct mem_array *mem_arr, *mem_arr_orig;
unsigned int i, j, alloc_size, curr_alloc_size;
- phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
+ phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
if (!phba->phwi_ctrlr)
return -ENOMEM;
@@ -2349,27 +2325,21 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
}
-static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
{
struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
- struct wrb_handle *pwrb_handle;
+ struct wrb_handle *pwrb_handle = NULL;
struct hwi_controller *phwi_ctrlr;
struct hwi_wrb_context *pwrb_context;
- struct iscsi_wrb *pwrb;
- unsigned int num_cxn_wrbh;
- unsigned int num_cxn_wrb, j, idx, index;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int num_cxn_wrbh = 0;
+ unsigned int num_cxn_wrb = 0, j, idx = 0, index;
mem_descr_wrbh = phba->init_mem;
mem_descr_wrbh += HWI_MEM_WRBH;
mem_descr_wrb = phba->init_mem;
mem_descr_wrb += HWI_MEM_WRB;
-
- idx = 0;
- pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
- num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
- ((sizeof(struct wrb_handle)) *
- phba->params.wrbs_per_cxn));
phwi_ctrlr = phba->phwi_ctrlr;
for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
@@ -2377,12 +2347,32 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
pwrb_context->pwrb_handle_base =
kzalloc(sizeof(struct wrb_handle *) *
phba->params.wrbs_per_cxn, GFP_KERNEL);
+ if (!pwrb_context->pwrb_handle_base) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Mem Alloc Failed. Failing to load\n");
+ goto init_wrb_hndl_failed;
+ }
pwrb_context->pwrb_handle_basestd =
kzalloc(sizeof(struct wrb_handle *) *
phba->params.wrbs_per_cxn, GFP_KERNEL);
+ if (!pwrb_context->pwrb_handle_basestd) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Mem Alloc Failed. Failing to load\n");
+ goto init_wrb_hndl_failed;
+ }
+ if (!num_cxn_wrbh) {
+ pwrb_handle =
+ mem_descr_wrbh->mem_array[idx].virtual_address;
+ num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
+ ((sizeof(struct wrb_handle)) *
+ phba->params.wrbs_per_cxn));
+ idx++;
+ }
+ pwrb_context->alloc_index = 0;
+ pwrb_context->wrb_handles_available = 0;
+ pwrb_context->free_index = 0;
+
if (num_cxn_wrbh) {
- pwrb_context->alloc_index = 0;
- pwrb_context->wrb_handles_available = 0;
for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
pwrb_context->pwrb_handle_base[j] = pwrb_handle;
pwrb_context->pwrb_handle_basestd[j] =
@@ -2391,49 +2381,21 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
pwrb_handle->wrb_index = j;
pwrb_handle++;
}
- pwrb_context->free_index = 0;
- num_cxn_wrbh--;
- } else {
- idx++;
- pwrb_handle =
- mem_descr_wrbh->mem_array[idx].virtual_address;
- num_cxn_wrbh =
- ((mem_descr_wrbh->mem_array[idx].size) /
- ((sizeof(struct wrb_handle)) *
- phba->params.wrbs_per_cxn));
- pwrb_context->alloc_index = 0;
- for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
- pwrb_context->pwrb_handle_base[j] = pwrb_handle;
- pwrb_context->pwrb_handle_basestd[j] =
- pwrb_handle;
- pwrb_context->wrb_handles_available++;
- pwrb_handle->wrb_index = j;
- pwrb_handle++;
- }
- pwrb_context->free_index = 0;
num_cxn_wrbh--;
}
}
idx = 0;
- pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
- num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
- ((sizeof(struct iscsi_wrb) *
- phba->params.wrbs_per_cxn));
for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
- if (num_cxn_wrb) {
- for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
- pwrb_handle = pwrb_context->pwrb_handle_base[j];
- pwrb_handle->pwrb = pwrb;
- pwrb++;
- }
- num_cxn_wrb--;
- } else {
- idx++;
+ if (!num_cxn_wrb) {
pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
- ((sizeof(struct iscsi_wrb) *
- phba->params.wrbs_per_cxn));
+ ((sizeof(struct iscsi_wrb) *
+ phba->params.wrbs_per_cxn));
+ idx++;
+ }
+
+ if (num_cxn_wrb) {
for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
pwrb_handle = pwrb_context->pwrb_handle_base[j];
pwrb_handle->pwrb = pwrb;
@@ -2442,6 +2404,14 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
num_cxn_wrb--;
}
}
+ return 0;
+init_wrb_hndl_failed:
+ for (j = index; j > 0; j--) {
+ pwrb_context = &phwi_ctrlr->wrb_context[j];
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
+ return -ENOMEM;
}
static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
@@ -2450,7 +2420,7 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
struct hba_parameters *p = &phba->params;
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_header_h, *pasync_data_h;
- unsigned int index;
+ unsigned int index, idx, num_per_mem, num_async_data;
struct be_mem_descriptor *mem_descr;
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
@@ -2462,10 +2432,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
memset(pasync_ctx, 0, sizeof(*pasync_ctx));
- pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
- pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
- pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
- pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
+ pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
+ pasync_ctx->buffer_size = p->defpdu_hdr_sz;
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
@@ -2510,19 +2478,6 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.writables = 0;
INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_BUF;
- if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
- } else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
- pasync_ctx->async_data.va_base =
- mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_data.pa_base.u.a64.address =
- mem_descr->mem_array[0].bus_address.u.a64.address;
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2553,6 +2508,25 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_data_h =
(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_BUF;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
+ "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address\n");
+ idx = 0;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].virtual_address;
+ pasync_ctx->async_data.pa_base.u.a64.address =
+ mem_descr->mem_array[idx].bus_address.u.a64.address;
+
+ num_async_data = ((mem_descr->mem_array[idx].size) /
+ phba->params.defpdu_data_sz);
+ num_per_mem = 0;
+
for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
pasync_header_h->cri = -1;
pasync_header_h->index = (char)index;
@@ -2578,14 +2552,29 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_data_h->cri = -1;
pasync_data_h->index = (char)index;
INIT_LIST_HEAD(&pasync_data_h->link);
+
+ if (!num_async_data) {
+ num_per_mem = 0;
+ idx++;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].virtual_address;
+ pasync_ctx->async_data.pa_base.u.a64.address =
+ mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+
+ num_async_data = ((mem_descr->mem_array[idx].size) /
+ phba->params.defpdu_data_sz);
+ }
pasync_data_h->pbuffer =
(void *)((unsigned long)
(pasync_ctx->async_data.va_base) +
- (p->defpdu_data_sz * index));
+ (p->defpdu_data_sz * num_per_mem));
pasync_data_h->pa.u.a64.address =
pasync_ctx->async_data.pa_base.u.a64.address +
- (p->defpdu_data_sz * index);
+ (p->defpdu_data_sz * num_per_mem);
+ num_per_mem++;
+ num_async_data--;
list_add_tail(&pasync_data_h->link,
&pasync_ctx->async_data.free_list);
@@ -2913,9 +2902,11 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
- if (mem->va)
+ if (mem->va) {
pci_free_consistent(phba->pcidev, mem->size,
mem->va, mem->dma);
+ mem->va = NULL;
+ }
}
static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
@@ -3215,7 +3206,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
error:
shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
hwi_cleanup(phba);
- return -ENOMEM;
+ return status;
}
static int hwi_init_controller(struct beiscsi_hba *phba)
@@ -3236,7 +3227,9 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
}
iscsi_init_global_templates(phba);
- beiscsi_init_wrb_handle(phba);
+ if (beiscsi_init_wrb_handle(phba))
+ return -ENOMEM;
+
hwi_init_async_pdu_ctx(phba);
if (hwi_init_port(phba) != 0) {
shost_printk(KERN_ERR, phba->shost,
@@ -3288,7 +3281,7 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
free_init:
beiscsi_free_mem(phba);
- return -ENOMEM;
+ return ret;
}
static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
@@ -3475,8 +3468,8 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
- struct be_cmd_resp_get_boot_target *boot_resp;
- struct be_cmd_resp_get_session *session_resp;
+ struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_cmd_get_session_resp *session_resp;
struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
unsigned int tag, wrb_num;
@@ -3484,9 +3477,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int ret = -ENOMEM;
- tag = beiscsi_get_boot_target(phba);
+ tag = mgmt_get_boot_target(phba);
if (!tag) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
return -EAGAIN;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3496,7 +3489,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
@@ -3522,8 +3515,8 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
}
memset(nonemb_cmd.va, 0, sizeof(*session_resp));
- tag = beiscsi_get_session_info(phba,
- boot_resp->boot_session_handle, &nonemb_cmd);
+ tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
+ &nonemb_cmd);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
" Failed\n");
@@ -3696,6 +3689,57 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
kfree(phba->ep_array);
}
+static void beiscsi_cleanup_task(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
+ - phba->fw_config.iscsi_cid_start];
+
+ if (io_task->cmd_bhs) {
+ pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+ io_task->bhs_pa.u.a64.address);
+ io_task->cmd_bhs = NULL;
+ }
+
+ if (task->sc) {
+ if (io_task->pwrb_handle) {
+ free_wrb_handle(phba, pwrb_context,
+ io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ }
+
+ if (io_task->psgl_handle) {
+ spin_lock(&phba->io_sgl_lock);
+ free_io_sgl_handle(phba, io_task->psgl_handle);
+ spin_unlock(&phba->io_sgl_lock);
+ io_task->psgl_handle = NULL;
+ }
+ } else {
+ if (!beiscsi_conn->login_in_progress) {
+ if (io_task->pwrb_handle) {
+ free_wrb_handle(phba, pwrb_context,
+ io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ }
+ if (io_task->psgl_handle) {
+ spin_lock(&phba->mgmt_sgl_lock);
+ free_mgmt_sgl_handle(phba,
+ io_task->psgl_handle);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = NULL;
+ }
+ }
+ }
+}
+
void
beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params)
@@ -3704,12 +3748,19 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct iscsi_target_context_update_wrb *pwrb = NULL;
struct be_mem_descriptor *mem_descr;
struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_task *task = beiscsi_conn->task;
+ struct iscsi_session *session = task->conn->session;
u32 doorbell = 0;
/*
* We can always use 0 here because it is reserved by libiscsi for
* login/startup related tasks.
*/
+ beiscsi_conn->login_in_progress = 0;
+ spin_lock_bh(&session->lock);
+ beiscsi_cleanup_task(task);
+ spin_unlock_bh(&session->lock);
+
pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start));
pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
@@ -3823,7 +3874,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
task->hdr_max = sizeof(struct be_cmd_bhs);
io_task->psgl_handle = NULL;
- io_task->psgl_handle = NULL;
+ io_task->pwrb_handle = NULL;
if (task->sc) {
spin_lock(&phba->io_sgl_lock);
@@ -3865,6 +3916,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->pwrb_handle =
beiscsi_conn->plogin_wrb_handle;
}
+ beiscsi_conn->task = task;
} else {
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -3907,53 +3959,11 @@ free_hndls:
io_task->pwrb_handle = NULL;
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
+ io_task->cmd_bhs = NULL;
SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
return -ENOMEM;
}
-static void beiscsi_cleanup_task(struct iscsi_task *task)
-{
- struct beiscsi_io_task *io_task = task->dd_data;
- struct iscsi_conn *conn = task->conn;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
- struct beiscsi_hba *phba = beiscsi_conn->phba;
- struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
- struct hwi_wrb_context *pwrb_context;
- struct hwi_controller *phwi_ctrlr;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
- - phba->fw_config.iscsi_cid_start];
- if (io_task->pwrb_handle) {
- free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
- io_task->pwrb_handle = NULL;
- }
-
- if (io_task->cmd_bhs) {
- pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
- io_task->bhs_pa.u.a64.address);
- }
-
- if (task->sc) {
- if (io_task->psgl_handle) {
- spin_lock(&phba->io_sgl_lock);
- free_io_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->io_sgl_lock);
- io_task->psgl_handle = NULL;
- }
- } else {
- if (task->hdr &&
- ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
- return;
- if (io_task->psgl_handle) {
- spin_lock(&phba->mgmt_sgl_lock);
- free_mgmt_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->mgmt_sgl_lock);
- io_task->psgl_handle = NULL;
- }
- }
-}
-
static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
unsigned int writedir)
@@ -3993,7 +4003,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
&io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
- cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
io_task->pwrb_handle->wrb_index);
@@ -4126,6 +4137,76 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
}
+/**
+ * beiscsi_bsg_request - handle bsg request from ISCSI transport
+ * @job: job to handle
+ */
+static int beiscsi_bsg_request(struct bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct beiscsi_hba *phba;
+ struct iscsi_bsg_request *bsg_req = job->request;
+ int rc = -EINVAL;
+ unsigned int tag;
+ struct be_dma_mem nonemb_cmd;
+ struct be_cmd_resp_hdr *resp;
+ struct iscsi_bsg_reply *bsg_reply = job->reply;
+ unsigned short status, extd_status;
+
+ shost = iscsi_job_to_shost(job);
+ phba = iscsi_host_priv(shost);
+
+ switch (bsg_req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ job->request_payload.payload_len,
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
+ "beiscsi_bsg_request\n");
+ return -EIO;
+ }
+ tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
+ &nonemb_cmd);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return -EAGAIN;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ nonemb_cmd.va, (resp->response_length
+ + sizeof(*resp)));
+ bsg_reply->reply_payload_rcv_len = resp->response_length;
+ bsg_reply->result = status;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ return -EIO;
+ }
+ break;
+
+ default:
+ SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
+ bsg_req->msgcode);
+ break;
+ }
+
+ return rc;
+}
+
static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
@@ -4183,6 +4264,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
return;
}
+ beiscsi_destroy_def_ifaces(phba);
beiscsi_quiesce(phba);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
@@ -4267,8 +4349,11 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->num_cpus = num_cpus;
SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
- if (enable_msix)
+ if (enable_msix) {
beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
ret = be_ctrl_init(phba, pcidev);
if (ret) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -4366,8 +4451,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
* iscsi boot.
*/
shost_printk(KERN_ERR, phba->shost, "Could not set up "
- "iSCSI boot info.");
+ "iSCSI boot info.\n");
+ beiscsi_create_def_ifaces(phba);
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
return 0;
@@ -4418,6 +4504,8 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = be2iscsi_attr_is_visible,
+ .set_iface_param = be2iscsi_iface_set_param,
+ .get_iface_param = be2iscsi_iface_get_param,
.set_param = beiscsi_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
@@ -4435,6 +4523,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.ep_poll = beiscsi_ep_poll,
.ep_disconnect = beiscsi_ep_disconnect,
.session_recovery_timedout = iscsi_session_recovery_timedout,
+ .bsg_request = beiscsi_bsg_request,
};
static struct pci_driver beiscsi_pci_driver = {
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b4a06d5e5f9e..40fea6ec879c 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -34,9 +34,9 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "4.1.239.0"
-#define BE_NAME "ServerEngines BladeEngine2" \
- "Linux iSCSI Driver version" BUILD_STR
+#define BUILD_STR "4.2.162.0"
+#define BE_NAME "Emulex OneConnect" \
+ "Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
@@ -316,6 +316,8 @@ struct beiscsi_hba {
struct iscsi_endpoint **ep_array;
struct iscsi_boot_kset *boot_kset;
struct Scsi_Host *shost;
+ struct iscsi_iface *ipv4_iface;
+ struct iscsi_iface *ipv6_iface;
struct {
/**
* group together since they are used most frequently
@@ -345,7 +347,7 @@ struct beiscsi_hba {
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
- unsigned int read_mac_address;
+ unsigned int interface_handle;
struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
@@ -525,8 +527,6 @@ struct hwi_async_pdu_context {
unsigned int free_entries;
unsigned int busy_entries;
- unsigned int buffer_size;
- unsigned int num_entries;
struct list_head free_list;
} async_header;
@@ -543,11 +543,12 @@ struct hwi_async_pdu_context {
unsigned int free_entries;
unsigned int busy_entries;
- unsigned int buffer_size;
struct list_head free_list;
- unsigned int num_entries;
} async_data;
+ unsigned int buffer_size;
+ unsigned int num_entries;
+
/**
* This is a varying size list! Do not add anything
* after this entry!!
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 44762cfa3e12..01bb04cd9e75 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -17,15 +17,17 @@
* Costa Mesa, CA 92626
*/
+#include <linux/bsg-lib.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
#include "be_mgmt.h"
#include "be_iscsi.h"
-#include <scsi/scsi_transport_iscsi.h>
-unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_mac_addr *req;
+ struct be_cmd_get_boot_target_req *req;
unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
@@ -42,22 +44,22 @@ unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
- sizeof(*req));
+ sizeof(struct be_cmd_get_boot_target_resp));
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
-unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
- u32 boot_session_handle,
- struct be_dma_mem *nonemb_cmd)
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
unsigned int tag = 0;
- struct be_cmd_req_get_session *req;
- struct be_cmd_resp_get_session *resp;
+ struct be_cmd_get_session_req *req;
+ struct be_cmd_get_session_resp *resp;
struct be_sge *sge;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
@@ -187,6 +189,72 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
return status;
}
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba,
+ struct bsg_job *job,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_cmd_resp_hdr *resp;
+ struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+ struct be_sge *mcc_sge = nonembedded_sgl(wrb);
+ unsigned int tag = 0;
+ struct iscsi_bsg_request *bsg_req = job->request;
+ struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
+ unsigned short region, sector_size, sector, offset;
+
+ nonemb_cmd->size = job->request_payload.payload_len;
+ memset(nonemb_cmd->va, 0, nonemb_cmd->size);
+ resp = nonemb_cmd->va;
+ region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+ sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4];
+ req->region = region;
+ req->sector = sector;
+ req->offset = offset;
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+ case BEISCSI_WRITE_FLASH:
+ offset = sector * sector_size + offset;
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_WRITE_FLASH, sizeof(*req));
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ nonemb_cmd->va + offset, job->request_len);
+ break;
+ case BEISCSI_READ_FLASH:
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_READ_FLASH, sizeof(*req));
+ break;
+ default:
+ shost_printk(KERN_WARNING, phba->shost,
+ "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
+ h_vendor.vendor_cmd[0]);
+ spin_unlock(&ctrl->mbox_lock);
+ return -ENOSYS;
+ }
+
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
+ job->request_payload.sg_cnt);
+ mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
+ wrb->tag0 |= tag;
+
+ be_mcc_notify(phba);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -328,7 +396,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep,
struct be_dma_mem *nonemb_cmd)
-
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
@@ -374,17 +441,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
if (dst_addr->sa_family == PF_INET) {
__be32 s_addr = daddr_in->sin_addr.s_addr;
req->ip_address.ip_type = BE2_IPV4;
- req->ip_address.ip_address[0] = s_addr & 0x000000ff;
- req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
- req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
- req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
+ req->ip_address.addr[0] = s_addr & 0x000000ff;
+ req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
+ req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
+ req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24;
req->tcp_port = ntohs(daddr_in->sin_port);
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
beiscsi_ep->ip_type = BE2_IPV4;
} else if (dst_addr->sa_family == PF_INET6) {
req->ip_address.ip_type = BE2_IPV6;
- memcpy(&req->ip_address.ip_address,
+ memcpy(&req->ip_address.addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
req->tcp_port = ntohs(daddr_in6->sin6_port);
beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
@@ -419,14 +486,399 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
return tag;
}
-unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_mac_addr *req;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
+ struct be_cmd_get_all_if_id_req *pbe_allid = req;
+ int status = 0;
+
+ memset(wrb, 0, sizeof(*wrb));
+
+ spin_lock(&ctrl->mbox_lock);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+ sizeof(*req));
+ status = be_mbox_notify(ctrl);
+ if (!status)
+ phba->interface_handle = pbe_allid->if_hndl_list[0];
+ else {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed in mgmt_get_all_if_id\n");
+ }
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *nonemb_cmd, void *resp_buf,
+ int resp_buf_len)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+ unsigned short status, extd_status;
+ struct be_sge *sge;
+ unsigned int tag;
+ int rc = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ rc = -ENOMEM;
+ goto free_cmd;
+ }
+ memset(wrb, 0, sizeof(*wrb));
+ wrb->tag0 |= tag;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1,
+ "mgmt_exec_nonemb_cmd Failed status = %d"
+ "extd_status = %d\n", status, extd_status);
+ rc = -EIO;
+ goto free_tag;
+ }
+
+ if (resp_buf)
+ memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+
+free_tag:
+ free_mcc_tag(&phba->ctrl, tag);
+free_cmd:
+ pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+ nonemb_cmd->va, nonemb_cmd->dma);
+ return rc;
+}
+
+static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
+ int iscsi_cmd, int size)
+{
+ cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
+ &cmd->dma);
+ if (!cmd->va) {
+ SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
+ return -ENOMEM;
+ }
+ memset(cmd->va, 0, sizeof(size));
+ cmd->size = size;
+ be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+ return 0;
+}
+
+static int
+mgmt_static_ip_modify(struct beiscsi_hba *phba,
+ struct be_cmd_get_if_info_resp *if_info,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t ip_action)
+{
+ struct be_cmd_set_ip_addr_req *req;
+ struct be_dma_mem nonemb_cmd;
+ uint32_t ip_type;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
+ if (rc)
+ return rc;
+
+ ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ BE2_IPV6 : BE2_IPV4 ;
+
+ req = nonemb_cmd.va;
+ req->ip_params.record_entry_count = 1;
+ req->ip_params.ip_record.action = ip_action;
+ req->ip_params.ip_record.interface_hndl =
+ phba->interface_handle;
+ req->ip_params.ip_record.ip_addr.size_of_structure =
+ sizeof(struct be_ip_addr_subnet_format);
+ req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+
+ if (ip_action == IP_ACTION_ADD) {
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+ ip_param->len);
+
+ if (subnet_param)
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ subnet_param->value, subnet_param->len);
+ } else {
+ memcpy(req->ip_params.ip_record.ip_addr.addr,
+ if_info->ip_addr.addr, ip_param->len);
+
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ if_info->ip_addr.subnet_mask, ip_param->len);
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0)
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to Modify existing IP Address\n");
+ return rc;
+}
+
+static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+ uint32_t gtway_action, uint32_t param_len)
+{
+ struct be_cmd_set_def_gateway_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rt_val;
+
+
+ rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
+ sizeof(*req));
+ if (rt_val)
+ return rt_val;
+
+ req = nonemb_cmd.va;
+ req->action = gtway_action;
+ req->ip_addr.ip_type = BE2_IPV4;
+
+ memcpy(req->ip_addr.addr, gt_addr, param_len);
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
+
+int mgmt_set_ip(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t boot_proto)
+{
+ struct be_cmd_get_def_gateway_resp gtway_addr_set;
+ struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_set_dhcp_req *dhcpreq;
+ struct be_cmd_rel_dhcp_req *reldhcp;
+ struct be_dma_mem nonemb_cmd;
+ uint8_t *gtway_addr;
+ uint32_t ip_type;
+ int rc;
+
+ if (mgmt_get_all_if_id(phba))
+ return -EIO;
+
+ memset(&if_info, 0, sizeof(if_info));
+ ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ BE2_IPV6 : BE2_IPV4 ;
+
+ rc = mgmt_get_if_info(phba, ip_type, &if_info);
+ if (rc)
+ return rc;
+
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ if (if_info.dhcp_state) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "DHCP Already Enabled\n");
+ return 0;
+ }
+ /* The ip_param->len is 1 in DHCP case. Setting
+ proper IP len as this it is used while
+ freeing the Static IP.
+ */
+ ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ IP_V6_LEN : IP_V4_LEN;
+
+ } else {
+ if (if_info.dhcp_state) {
+
+ memset(&if_info, 0, sizeof(if_info));
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+ sizeof(*reldhcp));
+
+ if (rc)
+ return rc;
+
+ reldhcp = nonemb_cmd.va;
+ reldhcp->interface_hndl = phba->interface_handle;
+ reldhcp->ip_type = ip_type;
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to Delete existing dhcp\n");
+ return rc;
+ }
+ }
+ }
+
+ /* Delete the Static IP Set */
+ if (if_info.ip_addr.addr[0]) {
+ rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+ IP_ACTION_DEL);
+ if (rc)
+ return rc;
+ }
+
+ /* Delete the Gateway settings if mode change is to DHCP */
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+ rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+ if (rc) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to Get Gateway Addr\n");
+ return rc;
+ }
+
+ if (gtway_addr_set.ip_addr.addr[0]) {
+ gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+ rc = mgmt_modify_gateway(phba, gtway_addr,
+ IP_ACTION_DEL, IP_V4_LEN);
+
+ if (rc) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to clear Gateway Addr Set\n");
+ return rc;
+ }
+ }
+ }
+
+ /* Set Adapter to DHCP/Static Mode */
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+ sizeof(*dhcpreq));
+ if (rc)
+ return rc;
+
+ dhcpreq = nonemb_cmd.va;
+ dhcpreq->flags = BLOCKING;
+ dhcpreq->retry_count = 1;
+ dhcpreq->interface_hndl = phba->interface_handle;
+ dhcpreq->ip_type = BE2_DHCP_V4;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ } else {
+ return mgmt_static_ip_modify(phba, &if_info, ip_param,
+ subnet_param, IP_ACTION_ADD);
+ }
+
+ return rc;
+}
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *gateway_param)
+{
+ struct be_cmd_get_def_gateway_resp gtway_addr_set;
+ uint8_t *gtway_addr;
+ int rt_val;
+
+ memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+ rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+ if (rt_val) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to Get Gateway Addr\n");
+ return rt_val;
+ }
+
+ if (gtway_addr_set.ip_addr.addr[0]) {
+ gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+ rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
+ gateway_param->len);
+ if (rt_val) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to clear Gateway Addr Set\n");
+ return rt_val;
+ }
+ }
+
+ gtway_addr = (uint8_t *)&gateway_param->value;
+ rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
+ gateway_param->len);
+
+ if (rt_val)
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed to Set Gateway Addr\n");
+
+ return rt_val;
+}
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_def_gateway_resp *gateway)
+{
+ struct be_cmd_get_def_gateway_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+ sizeof(*gateway));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->ip_type = ip_type;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
+ sizeof(*gateway));
+}
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp *if_info)
+{
+ struct be_cmd_get_if_info_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ if (mgmt_get_all_if_id(phba))
+ return -EIO;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ sizeof(*if_info));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->interface_hndl = phba->interface_handle;
+ req->ip_type = ip_type;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
+ sizeof(*if_info));
+}
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+ struct be_cmd_get_nic_conf_resp *nic)
+{
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+ sizeof(*nic));
+ if (rc)
+ return rc;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
+}
+
+
+
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
+{
unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_hba_name *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
- SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
@@ -438,12 +890,38 @@ unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
- OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
- sizeof(*req));
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
+ sizeof(*req));
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
+{
+ unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_ntwk_link_status_req *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req));
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 08428824ace2..5c2e37693ca8 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -20,11 +20,16 @@
#ifndef _BEISCSI_MGMT_
#define _BEISCSI_MGMT_
-#include <linux/types.h>
-#include <linux/list.h>
+#include <scsi/scsi_bsg_iscsi.h>
#include "be_iscsi.h"
#include "be_main.h"
+#define IP_ACTION_ADD 0x01
+#define IP_ACTION_DEL 0x02
+
+#define IP_V6_LEN 16
+#define IP_V4_LEN 4
+
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
@@ -98,6 +103,10 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invalidate_command_table *inv_tbl,
unsigned int num_invalidate, unsigned int cid,
struct be_dma_mem *nonemb_cmd);
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba,
+ struct bsg_job *job,
+ struct be_dma_mem *nonemb_cmd);
struct iscsi_invalidate_connection_params_in {
struct be_cmd_req_hdr hdr;
@@ -204,6 +213,13 @@ struct be_mgmt_controller_attributes_resp {
struct mgmt_controller_attributes params;
} __packed;
+struct be_bsg_vendor_cmd {
+ struct be_cmd_req_hdr hdr;
+ unsigned short region;
+ unsigned short offset;
+ unsigned short sector;
+} __packed;
+
/* configuration management */
#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
@@ -219,12 +235,15 @@ struct be_mgmt_controller_attributes_resp {
/* the CMD_RESPONSE_HEADER */
#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
- pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
bus_address.u.a32.address_lo; \
- pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
bus_address.u.a32.address_hi; \
}
+#define BEISCSI_WRITE_FLASH 0
+#define BEISCSI_READ_FLASH 1
+
struct beiscsi_endpoint {
struct beiscsi_hba *phba;
struct beiscsi_sess *sess;
@@ -248,4 +267,27 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short issue_reset,
unsigned short savecfg_flag);
+int mgmt_set_ip(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t boot_proto);
+
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd);
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+ struct be_cmd_get_nic_conf_resp *mac);
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp *if_info);
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_def_gateway_resp *gateway);
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *gateway_param);
+
#endif
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index e75e07d25915..51c9e1345719 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -799,9 +799,6 @@ struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
enum bfa_lport_role roles,
struct bfad_vf_s *vf_drv,
struct bfad_vport_s *vp_drv);
-void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
- struct bfad_vf_s *vf_drv,
- struct bfad_vport_s *vp_drv);
/*
* vport callbacks
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 5d2a1307e5ce..937000db62a8 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -616,7 +616,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
__port_action[port->fabric->fab_type].online(port);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
- BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Logical port online: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
@@ -639,12 +639,12 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
if (bfa_sm_cmp_state(port->fabric,
bfa_fcs_fabric_sm_online) == BFA_TRUE) {
- BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
} else {
- BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Logical port taken offline: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
@@ -709,14 +709,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
/* Base port will be deleted by the OS driver */
- if (port->vport) {
- bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
- port->fabric->vf_drv,
- port->vport ? port->vport->vport_drv : NULL);
+ if (port->vport)
bfa_fcs_vport_delete_comp(port->vport);
- } else {
+ else
bfa_wc_down(&port->fabric->wc);
- }
}
@@ -5714,17 +5710,23 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
(struct bfad_vport_s *)vport->vport_drv;
bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
+ bfa_lps_delete(vport->lps);
- if (vport_drv->comp_del)
+ if (vport_drv->comp_del) {
complete(vport_drv->comp_del);
- else
- kfree(vport_drv);
+ return;
+ }
- bfa_lps_delete(vport->lps);
+ /*
+ * We queue the vport delete work to the IM work_q from here.
+ * The memory for the bfad_vport_s is freed from the FC function
+ * template vport_delete entry point.
+ */
+ if (vport_drv)
+ bfad_im_port_delete(vport_drv->drv_port.bfad,
+ &vport_drv->drv_port);
}
-
-
/*
* fcs_vport_public FCS virtual port public interfaces
*/
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 404fd10ddb21..2e4b0be14a20 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -456,23 +456,6 @@ bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
return port_drv;
}
-void
-bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
- struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
-{
- struct bfad_port_s *port_drv;
-
- /* this will be only called from rmmod context */
- if (vp_drv && !vp_drv->comp_del) {
- port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
- ((vf_drv) ? (&(vf_drv)->base_port) :
- (&(bfad)->pport));
- bfa_trc(bfad, roles);
- if (roles & BFA_LPORT_ROLE_FCP_IM)
- bfad_im_port_delete(bfad, port_drv);
- }
-}
-
/*
* FCS RPORT alloc callback, after successful PLOGI by FCS
*/
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 7b1ecd2b3ffe..8b6c6bf7837e 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -497,6 +497,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
if (im_port->flags & BFAD_PORT_DELETE) {
bfad_scsi_host_free(bfad, im_port);
list_del(&vport->list_entry);
+ kfree(vport);
return 0;
}
@@ -758,25 +759,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
else if (!strcmp(model, "Brocade-804"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 8Gbps FC HBA for HP Bladesystem C-class");
- else if (!strcmp(model, "Brocade-902") ||
- !strcmp(model, "Brocade-1741"))
+ else if (!strcmp(model, "Brocade-1741"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps CNA for Dell M-Series Blade Servers");
- else if (strstr(model, "Brocade-1560")) {
- if (nports == 1)
- snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 16Gbps PCIe single port FC HBA");
- else
- snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 16Gbps PCIe dual port FC HBA");
- } else if (strstr(model, "Brocade-1710")) {
- if (nports == 1)
- snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 10Gbps single port CNA");
- else
- snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 10Gbps dual port CNA");
- } else if (strstr(model, "Brocade-1860")) {
+ else if (strstr(model, "Brocade-1860")) {
if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps single port CNA");
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 495a841645f9..25093a04123b 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 72118db89a20..dc0a08e69c82 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0bd70e80efe4..0c53c28dc3d3 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f9d6f4129093..ece47e502282 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 4927cca733d3..8b6816706ee5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.7.0.3"
-#define DRV_MODULE_RELDATE "Jun 15, 2011"
+#define DRV_MODULE_VERSION "2.7.2.2"
+#define DRV_MODULE_RELDATE "Apr 25, 2012"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1a44b45e7bef..f8d516b53161 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
/*
* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -2244,6 +2244,7 @@ static struct scsi_host_template bnx2i_host_template = {
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.change_queue_depth = iscsi_change_queue_depth,
+ .target_alloc = iscsi_target_alloc,
.can_queue = 2048,
.max_sectors = 127,
.cmd_per_lun = 128,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 83a77f7244d2..c61cf7a43658 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2004 - 2011 Broadcom Corporation
+ * Copyright (c) 2004 - 2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 04c5cea47a22..fda9cdea0e60 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -55,11 +55,16 @@
#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
#define ALUA_FAILOVER_RETRIES 5
+/* flags passed from user level */
+#define ALUA_OPTIMIZE_STPG 1
+
struct alua_dh_data {
int group_id;
int rel_port;
int tpgs;
int state;
+ int pref;
+ unsigned flags; /* used for optimizing STPG */
unsigned char inq[ALUA_INQUIRY_SIZE];
unsigned char *buff;
int bufflen;
@@ -554,14 +559,16 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
if (h->group_id == (ucp[2] << 8) + ucp[3]) {
h->state = ucp[0] & 0x0f;
+ h->pref = ucp[0] >> 7;
valid_states = ucp[1];
}
off = 8 + (ucp[7] * 4);
}
sdev_printk(KERN_INFO, sdev,
- "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n",
+ "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
+ h->pref ? "preferred" : "non-preferred",
valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
@@ -621,6 +628,37 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
out:
return err;
}
+/*
+ * alua_set_params - set/unset the optimize flag
+ * @sdev: device on the path to be activated
+ * params - parameters in the following format
+ * "no_of_params\0param1\0param2\0param3\0...\0"
+ * For example, to set the flag pass the following parameters
+ * from multipath.conf
+ * hardware_handler "2 alua 1"
+ */
+static int alua_set_params(struct scsi_device *sdev, const char *params)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ unsigned int optimize = 0, argc;
+ const char *p = params;
+ int result = SCSI_DH_OK;
+
+ if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
+ return -EINVAL;
+
+ while (*p++)
+ ;
+ if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
+ return -EINVAL;
+
+ if (optimize)
+ h->flags |= ALUA_OPTIMIZE_STPG;
+ else
+ h->flags &= ~ALUA_OPTIMIZE_STPG;
+
+ return result;
+}
/*
* alua_activate - activate a path
@@ -637,14 +675,37 @@ static int alua_activate(struct scsi_device *sdev,
{
struct alua_dh_data *h = get_alua_data(sdev);
int err = SCSI_DH_OK;
+ int stpg = 0;
err = alua_rtpg(sdev, h);
if (err != SCSI_DH_OK)
goto out;
- if (h->tpgs & TPGS_MODE_EXPLICIT &&
- h->state != TPGS_STATE_OPTIMIZED &&
- h->state != TPGS_STATE_LBA_DEPENDENT) {
+ if (h->tpgs & TPGS_MODE_EXPLICIT) {
+ switch (h->state) {
+ case TPGS_STATE_NONOPTIMIZED:
+ stpg = 1;
+ if ((h->flags & ALUA_OPTIMIZE_STPG) &&
+ (!h->pref) &&
+ (h->tpgs & TPGS_MODE_IMPLICIT))
+ stpg = 0;
+ break;
+ case TPGS_STATE_STANDBY:
+ stpg = 1;
+ break;
+ case TPGS_STATE_UNAVAILABLE:
+ case TPGS_STATE_OFFLINE:
+ err = SCSI_DH_IO;
+ break;
+ case TPGS_STATE_TRANSITIONING:
+ err = SCSI_DH_RETRY;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (stpg) {
h->callback_fn = fn;
h->callback_data = data;
err = submit_stpg(h);
@@ -698,6 +759,7 @@ static struct scsi_device_handler alua_dh = {
.prep_fn = alua_prep_fn,
.check_sense = alua_check_sense,
.activate = alua_activate,
+ .set_params = alua_set_params,
.match = alua_match,
};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 335e85192807..76e3d0b5bfa6 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -411,20 +411,18 @@ out:
}
/**
- * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * fcoe_interface_remove() - remove FCoE interface from netdev
* @fcoe: The FCoE interface to be cleaned up
*
* Caller must be holding the RTNL mutex
*/
-static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
- rtnl_lock();
-
/*
* Don't listen for Ethernet packets anymore.
* synchronize_net() ensures that the packet handlers are not running
@@ -453,12 +451,28 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
" specific feature for LLD.\n");
}
+ fcoe->removed = 1;
+}
+
+
+/**
+ * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * @fcoe: The FCoE interface to be cleaned up
+ */
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+{
+ struct net_device *netdev = fcoe->netdev;
+ struct fcoe_ctlr *fip = &fcoe->ctlr;
+ rtnl_lock();
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
rtnl_unlock();
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
+ scsi_host_put(fcoe->ctlr.lp->host);
kfree(fcoe);
dev_put(netdev);
module_put(THIS_MODULE);
@@ -522,13 +536,11 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(fcoe->netdev, port->data_src_addr);
if (!is_zero_ether_addr(addr))
dev_uc_add(fcoe->netdev, addr);
memcpy(port->data_src_addr, addr, ETH_ALEN);
- rtnl_unlock();
}
/**
@@ -941,6 +953,10 @@ static void fcoe_if_destroy(struct fc_lport *lport)
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(netdev, port->data_src_addr);
+ if (lport->vport)
+ synchronize_net();
+ else
+ fcoe_interface_remove(fcoe);
rtnl_unlock();
/* Free queued packets for the per-CPU receive threads */
@@ -959,8 +975,12 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Free memory used by statistical counters */
fc_lport_free_stats(lport);
- /* Release the Scsi_Host */
- scsi_host_put(lport->host);
+ /*
+ * Release the Scsi_Host for vport but hold on to
+ * master lport until it fcoe interface fully cleaned-up.
+ */
+ if (lport->vport)
+ scsi_host_put(lport->host);
}
/**
@@ -2274,10 +2294,9 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
continue;
skb = dev_alloc_skb(0);
- if (!skb) {
- spin_unlock_bh(&pp->fcoe_rx_list.lock);
+ if (!skb)
continue;
- }
+
skb->destructor = fcoe_percpu_flush_done;
spin_lock_bh(&pp->fcoe_rx_list.lock);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 3c2733a12aa1..96ac938d39cc 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,7 +71,8 @@ do { \
* @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
- * This structure is 1:1 with a net devive.
+ * @removed: Indicates fcoe interface removed from net device
+ * This structure is 1:1 with a net device.
*/
struct fcoe_interface {
struct list_head list;
@@ -81,6 +82,7 @@ struct fcoe_interface {
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
+ u8 removed;
};
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 249a106888d9..5a4c7250aa77 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1883,7 +1883,13 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
frame = (struct fip_frame *)skb->data;
memset(frame, 0, len);
memcpy(frame->eth.h_dest, dest, ETH_ALEN);
- memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+
+ if (sub == FIP_SC_VN_BEACON) {
+ hton24(frame->eth.h_source, FIP_VN_FC_MAP);
+ hton24(frame->eth.h_source + 3, fip->port_id);
+ } else {
+ memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ }
frame->eth.h_proto = htons(ETH_P_FIP);
frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 500e20dd56ec..796482badf13 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -159,6 +159,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev,
int qdepth, int reason);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
@@ -171,7 +172,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int *bucket_map);
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
-static inline u32 next_command(struct ctlr_info *h);
+static inline u32 next_command(struct ctlr_info *h, u8 q);
static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
u64 *cfg_offset);
@@ -180,6 +181,7 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready);
+static inline void finish_cmd(struct CommandList *c);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
@@ -234,6 +236,16 @@ static int check_for_unit_attention(struct ctlr_info *h,
return 1;
}
+static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
+{
+ if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
+ (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
+ c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
+ return 0;
+ dev_warn(&h->pdev->dev, HPSA "device busy");
+ return 1;
+}
+
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -368,7 +380,7 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
}
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
- "UNKNOWN"
+ "1(ADM)", "UNKNOWN"
};
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
@@ -497,6 +509,7 @@ static struct scsi_host_template hpsa_driver_template = {
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = hpsa_eh_abort_handler,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
@@ -516,24 +529,28 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
list_add_tail(&c->list, list);
}
-static inline u32 next_command(struct ctlr_info *h)
+static inline u32 next_command(struct ctlr_info *h, u8 q)
{
u32 a;
+ struct reply_pool *rq = &h->reply_queue[q];
+ unsigned long flags;
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
- return h->access.command_completed(h);
+ return h->access.command_completed(h, q);
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
+ if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+ a = rq->head[rq->current_entry];
+ rq->current_entry++;
+ spin_lock_irqsave(&h->lock, flags);
h->commands_outstanding--;
+ spin_unlock_irqrestore(&h->lock, flags);
} else {
a = FIFO_EMPTY;
}
/* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
+ if (rq->current_entry == h->max_commands) {
+ rq->current_entry = 0;
+ rq->wraparound ^= 1;
}
return a;
}
@@ -544,8 +561,41 @@ static inline u32 next_command(struct ctlr_info *h)
*/
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
- if (likely(h->transMethod & CFGTBL_Trans_Performant))
+ if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+ if (likely(h->msix_vector))
+ c->Header.ReplyQueue =
+ smp_processor_id() % h->nreply_queues;
+ }
+}
+
+static int is_firmware_flash_cmd(u8 *cdb)
+{
+ return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
+}
+
+/*
+ * During firmware flash, the heartbeat register may not update as frequently
+ * as it should. So we dial down lockup detection during firmware flash. and
+ * dial it back up when firmware flash completes.
+ */
+#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
+#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
+static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (!is_firmware_flash_cmd(c->Request.CDB))
+ return;
+ atomic_inc(&h->firmware_flash_in_progress);
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
+}
+
+static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (is_firmware_flash_cmd(c->Request.CDB) &&
+ atomic_dec_and_test(&h->firmware_flash_in_progress))
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
}
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
@@ -554,11 +604,12 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
unsigned long flags;
set_performant_mode(h, c);
+ dial_down_lockup_detection_during_fw_flash(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
- start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
+ start_io(h);
}
static inline void removeQ(struct CommandList *c)
@@ -1193,7 +1244,7 @@ static void complete_scsi_command(struct CommandList *cp)
break;
}
/* Must be some other type of check condition */
- dev_warn(&h->pdev->dev, "cp %p has check condition: "
+ dev_dbg(&h->pdev->dev, "cp %p has check condition: "
"unknown type: "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x, "
@@ -1370,16 +1421,24 @@ static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
}
}
+#define MAX_DRIVER_CMD_RETRIES 25
static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
struct CommandList *c, int data_direction)
{
- int retry_count = 0;
+ int backoff_time = 10, retry_count = 0;
do {
memset(c->err_info, 0, sizeof(*c->err_info));
hpsa_scsi_do_simple_cmd_core(h, c);
retry_count++;
- } while (check_for_unit_attention(h, c) && retry_count <= 3);
+ if (retry_count > 3) {
+ msleep(backoff_time);
+ if (backoff_time < 1000)
+ backoff_time *= 2;
+ }
+ } while ((check_for_unit_attention(h, c) ||
+ check_for_busy(h, c)) &&
+ retry_count <= MAX_DRIVER_CMD_RETRIES);
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
}
@@ -2065,9 +2124,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
done(cmd);
return 0;
}
- /* Need a lock as this is being allocated from the pool */
- c = cmd_alloc(h);
spin_unlock_irqrestore(&h->lock, flags);
+ c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2334,6 +2392,261 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return FAILED;
}
+static void swizzle_abort_tag(u8 *tag)
+{
+ u8 original_tag[8];
+
+ memcpy(original_tag, tag, 8);
+ tag[0] = original_tag[3];
+ tag[1] = original_tag[2];
+ tag[2] = original_tag[1];
+ tag[3] = original_tag[0];
+ tag[4] = original_tag[7];
+ tag[5] = original_tag[6];
+ tag[6] = original_tag[5];
+ tag[7] = original_tag[4];
+}
+
+static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
+ struct CommandList *abort, int swizzle)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ if (swizzle)
+ swizzle_abort_tag(&c->Request.CDB[4]);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
+ __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+ /* no unmap needed here because no data xfer. */
+
+ ei = c->err_info;
+ switch (ei->CommandStatus) {
+ case CMD_SUCCESS:
+ break;
+ case CMD_UNABORTABLE: /* Very common, don't make noise. */
+ rc = -1;
+ break;
+ default:
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
+ __func__, abort->Header.Tag.upper,
+ abort->Header.Tag.lower);
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ break;
+ }
+ cmd_special_free(h, c);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
+ abort->Header.Tag.upper, abort->Header.Tag.lower);
+ return rc;
+}
+
+/*
+ * hpsa_find_cmd_in_queue
+ *
+ * Used to determine whether a command (find) is still present
+ * in queue_head. Optionally excludes the last element of queue_head.
+ *
+ * This is used to avoid unnecessary aborts. Commands in h->reqQ have
+ * not yet been submitted, and so can be aborted by the driver without
+ * sending an abort to the hardware.
+ *
+ * Returns pointer to command if found in queue, NULL otherwise.
+ */
+static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
+ struct scsi_cmnd *find, struct list_head *queue_head)
+{
+ unsigned long flags;
+ struct CommandList *c = NULL; /* ptr into cmpQ */
+
+ if (!find)
+ return 0;
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(c, queue_head, list) {
+ if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
+ continue;
+ if (c->scsi_cmd == find) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return c;
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return NULL;
+}
+
+static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
+ u8 *tag, struct list_head *queue_head)
+{
+ unsigned long flags;
+ struct CommandList *c;
+
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(c, queue_head, list) {
+ if (memcmp(&c->Header.Tag, tag, 8) != 0)
+ continue;
+ spin_unlock_irqrestore(&h->lock, flags);
+ return c;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return NULL;
+}
+
+/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
+ * tell which kind we're dealing with, so we send the abort both ways. There
+ * shouldn't be any collisions between swizzled and unswizzled tags due to the
+ * way we construct our tags but we check anyway in case the assumptions which
+ * make this true someday become false.
+ */
+static int hpsa_send_abort_both_ways(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct CommandList *abort)
+{
+ u8 swizzled_tag[8];
+ struct CommandList *c;
+ int rc = 0, rc2 = 0;
+
+ /* we do not expect to find the swizzled tag in our queue, but
+ * check anyway just to be sure the assumptions which make this
+ * the case haven't become wrong.
+ */
+ memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
+ swizzle_abort_tag(swizzled_tag);
+ c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
+ if (c != NULL) {
+ dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
+ return hpsa_send_abort(h, scsi3addr, abort, 0);
+ }
+ rc = hpsa_send_abort(h, scsi3addr, abort, 0);
+
+ /* if the command is still in our queue, we can't conclude that it was
+ * aborted (it might have just completed normally) but in any case
+ * we don't need to try to abort it another way.
+ */
+ c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
+ if (c)
+ rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
+ return rc && rc2;
+}
+
+/* Send an abort for the specified command.
+ * If the device and controller support it,
+ * send a task abort request.
+ */
+static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
+{
+
+ int i, rc;
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ struct CommandList *abort; /* pointer to command to be aborted */
+ struct CommandList *found;
+ struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
+ char msg[256]; /* For debug messaging. */
+ int ml = 0;
+
+ /* Find the controller of the command to be aborted */
+ h = sdev_to_hba(sc->device);
+ if (WARN(h == NULL,
+ "ABORT REQUEST FAILED, Controller lookup failed.\n"))
+ return FAILED;
+
+ /* Check that controller supports some kind of task abort */
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
+ !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ return FAILED;
+
+ memset(msg, 0, sizeof(msg));
+ ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
+ h->scsi_host->host_no, sc->device->channel,
+ sc->device->id, sc->device->lun);
+
+ /* Find the device of the command to be aborted */
+ dev = sc->device->hostdata;
+ if (!dev) {
+ dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
+ msg);
+ return FAILED;
+ }
+
+ /* Get SCSI command to be aborted */
+ abort = (struct CommandList *) sc->host_scribble;
+ if (abort == NULL) {
+ dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
+ msg);
+ return FAILED;
+ }
+
+ ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
+ abort->Header.Tag.upper, abort->Header.Tag.lower);
+ as = (struct scsi_cmnd *) abort->scsi_cmd;
+ if (as != NULL)
+ ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
+ as->cmnd[0], as->serial_number);
+ dev_dbg(&h->pdev->dev, "%s\n", msg);
+ dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+
+ /* Search reqQ to See if command is queued but not submitted,
+ * if so, complete the command with aborted status and remove
+ * it from the reqQ.
+ */
+ found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
+ if (found) {
+ found->err_info->CommandStatus = CMD_ABORTED;
+ finish_cmd(found);
+ dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
+ msg);
+ return SUCCESS;
+ }
+
+ /* not in reqQ, if also not in cmpQ, must have already completed */
+ found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
+ if (!found) {
+ dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
+ msg);
+ return SUCCESS;
+ }
+
+ /*
+ * Command is in flight, or possibly already completed
+ * by the firmware (but not to the scsi mid layer) but we can't
+ * distinguish which. Send the abort down.
+ */
+ rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
+ if (rc != 0) {
+ dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
+ dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
+ h->scsi_host->host_no,
+ dev->bus, dev->target, dev->lun);
+ return FAILED;
+ }
+ dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
+
+ /* If the abort(s) above completed and actually aborted the
+ * command, then the command to be aborted should already be
+ * completed. If not, wait around a bit more to see if they
+ * manage to complete normally.
+ */
+#define ABORT_COMPLETE_WAIT_SECS 30
+ for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
+ found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
+ if (!found)
+ return SUCCESS;
+ msleep(100);
+ }
+ dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
+ msg, ABORT_COMPLETE_WAIT_SECS);
+ return FAILED;
+}
+
+
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -2346,14 +2659,21 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
int i;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
+ unsigned long flags;
+ spin_lock_irqsave(&h->lock, flags);
do {
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
+ if (i == h->nr_cmds) {
+ spin_unlock_irqrestore(&h->lock, flags);
return NULL;
+ }
} while (test_and_set_bit
(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ h->nr_allocs++;
+ spin_unlock_irqrestore(&h->lock, flags);
+
c = h->cmd_pool + i;
memset(c, 0, sizeof(*c));
cmd_dma_handle = h->cmd_pool_dhandle
@@ -2362,7 +2682,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
memset(c->err_info, 0, sizeof(*c->err_info));
err_dma_handle = h->errinfo_pool_dhandle
+ i * sizeof(*c->err_info);
- h->nr_allocs++;
c->cmdindex = i;
@@ -2418,11 +2737,14 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
int i;
+ unsigned long flags;
i = c - h->cmd_pool;
+ spin_lock_irqsave(&h->lock, flags);
clear_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
h->nr_frees++;
+ spin_unlock_irqrestore(&h->lock, flags);
}
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
@@ -2866,6 +3188,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
int cmd_type)
{
int pci_dir = XFER_NONE;
+ struct CommandList *a; /* for commands to be aborted */
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
@@ -2949,8 +3272,35 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[5] = 0x00;
c->Request.CDB[6] = 0x00;
c->Request.CDB[7] = 0x00;
+ break;
+ case HPSA_ABORT_MSG:
+ a = buff; /* point to command to be aborted */
+ dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
+ a->Header.Tag.upper, a->Header.Tag.lower,
+ c->Header.Tag.upper, c->Header.Tag.lower);
+ c->Request.CDBLen = 16;
+ c->Request.Type.Type = TYPE_MSG;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
+ c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
+ c->Request.CDB[2] = 0x00; /* reserved */
+ c->Request.CDB[3] = 0x00; /* reserved */
+ /* Tag to abort goes in CDB[4]-CDB[11] */
+ c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
+ c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
+ c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
+ c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
+ c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
+ c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
+ c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
+ c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
+ c->Request.CDB[12] = 0x00; /* reserved */
+ c->Request.CDB[13] = 0x00; /* reserved */
+ c->Request.CDB[14] = 0x00; /* reserved */
+ c->Request.CDB[15] = 0x00; /* reserved */
break;
-
default:
dev_warn(&h->pdev->dev, "unknown message type %d\n",
cmd);
@@ -2998,7 +3348,9 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
+ unsigned long flags;
+ spin_lock_irqsave(&h->lock, flags);
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
@@ -3011,17 +3363,28 @@ static void start_io(struct ctlr_info *h)
removeQ(c);
h->Qdepth--;
- /* Tell the controller execute command */
- h->access.submit_command(h, c);
-
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
+
+ /* Must increment commands_outstanding before unlocking
+ * and submitting to avoid race checking for fifo full
+ * condition.
+ */
+ h->commands_outstanding++;
+ if (h->commands_outstanding > h->max_outstanding)
+ h->max_outstanding = h->commands_outstanding;
+
+ /* Tell the controller execute command */
+ spin_unlock_irqrestore(&h->lock, flags);
+ h->access.submit_command(h, c);
+ spin_lock_irqsave(&h->lock, flags);
}
+ spin_unlock_irqrestore(&h->lock, flags);
}
-static inline unsigned long get_next_completion(struct ctlr_info *h)
+static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
{
- return h->access.command_completed(h);
+ return h->access.command_completed(h, q);
}
static inline bool interrupt_pending(struct ctlr_info *h)
@@ -3045,9 +3408,14 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
return 0;
}
-static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+static inline void finish_cmd(struct CommandList *c)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->h->lock, flags);
removeQ(c);
+ spin_unlock_irqrestore(&c->h->lock, flags);
+ dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
if (likely(c->cmd_type == CMD_SCSI))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
@@ -3075,36 +3443,38 @@ static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
}
/* process completion of an indexed ("direct lookup") command */
-static inline u32 process_indexed_cmd(struct ctlr_info *h,
+static inline void process_indexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag_index;
struct CommandList *c;
tag_index = hpsa_tag_to_index(raw_tag);
- if (bad_tag(h, tag_index, raw_tag))
- return next_command(h);
- c = h->cmd_pool + tag_index;
- finish_cmd(c, raw_tag);
- return next_command(h);
+ if (!bad_tag(h, tag_index, raw_tag)) {
+ c = h->cmd_pool + tag_index;
+ finish_cmd(c);
+ }
}
/* process completion of a non-indexed command */
-static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
+static inline void process_nonindexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag;
struct CommandList *c = NULL;
+ unsigned long flags;
tag = hpsa_tag_discard_error_bits(h, raw_tag);
+ spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
- finish_cmd(c, raw_tag);
- return next_command(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+ finish_cmd(c);
+ return;
}
}
+ spin_unlock_irqrestore(&h->lock, flags);
bad_tag(h, h->nr_cmds + 1, raw_tag);
- return next_command(h);
}
/* Some controllers, like p400, will give us one interrupt
@@ -3126,10 +3496,20 @@ static int ignore_bogus_interrupt(struct ctlr_info *h)
return 1;
}
-static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
+/*
+ * Convert &h->q[x] (passed to interrupt handlers) back to h.
+ * Relies on (h-q[x] == x) being true for x such that
+ * 0 <= x < MAX_REPLY_QUEUES.
+ */
+static struct ctlr_info *queue_to_hba(u8 *queue)
{
- struct ctlr_info *h = dev_id;
- unsigned long flags;
+ return container_of((queue - *queue), struct ctlr_info, q[0]);
+}
+
+static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
+{
+ struct ctlr_info *h = queue_to_hba(queue);
+ u8 q = *(u8 *) queue;
u32 raw_tag;
if (ignore_bogus_interrupt(h))
@@ -3137,74 +3517,68 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
if (interrupt_not_for_us(h))
return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
+ raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
+ raw_tag = next_command(h, q);
}
- spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
-static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
+static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
{
- struct ctlr_info *h = dev_id;
- unsigned long flags;
+ struct ctlr_info *h = queue_to_hba(queue);
u32 raw_tag;
+ u8 q = *(u8 *) queue;
if (ignore_bogus_interrupt(h))
return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
- raw_tag = get_next_completion(h);
+ raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
- spin_unlock_irqrestore(&h->lock, flags);
+ raw_tag = next_command(h, q);
return IRQ_HANDLED;
}
-static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
+static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
{
- struct ctlr_info *h = dev_id;
- unsigned long flags;
+ struct ctlr_info *h = queue_to_hba((u8 *) queue);
u32 raw_tag;
+ u8 q = *(u8 *) queue;
if (interrupt_not_for_us(h))
return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
+ raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY) {
- if (hpsa_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
+ if (likely(hpsa_tag_contains_index(raw_tag)))
+ process_indexed_cmd(h, raw_tag);
else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
+ process_nonindexed_cmd(h, raw_tag);
+ raw_tag = next_command(h, q);
}
}
- spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
-static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
+static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
{
- struct ctlr_info *h = dev_id;
- unsigned long flags;
+ struct ctlr_info *h = queue_to_hba(queue);
u32 raw_tag;
+ u8 q = *(u8 *) queue;
- spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
- raw_tag = get_next_completion(h);
+ raw_tag = get_next_completion(h, q);
while (raw_tag != FIFO_EMPTY) {
- if (hpsa_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
+ if (likely(hpsa_tag_contains_index(raw_tag)))
+ process_indexed_cmd(h, raw_tag);
else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
+ process_nonindexed_cmd(h, raw_tag);
+ raw_tag = next_command(h, q);
}
- spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
@@ -3638,10 +4012,13 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
- int err;
- struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
- {0, 2}, {0, 3}
- };
+ int err, i;
+ struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
+
+ for (i = 0; i < MAX_REPLY_QUEUES; i++) {
+ hpsa_msix_entries[i].vector = 0;
+ hpsa_msix_entries[i].entry = i;
+ }
/* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
@@ -3649,12 +4026,11 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
- err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
+ err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+ MAX_REPLY_QUEUES);
if (!err) {
- h->intr[0] = hpsa_msix_entries[0].vector;
- h->intr[1] = hpsa_msix_entries[1].vector;
- h->intr[2] = hpsa_msix_entries[2].vector;
- h->intr[3] = hpsa_msix_entries[3].vector;
+ for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ h->intr[i] = hpsa_msix_entries[i].vector;
h->msix_vector = 1;
return;
}
@@ -3705,14 +4081,6 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
}
-static inline bool hpsa_board_disabled(struct pci_dev *pdev)
-{
- u16 command;
-
- (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
- return ((command & PCI_COMMAND_MEMORY) == 0);
-}
-
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar)
{
@@ -3838,14 +4206,14 @@ static void __devinit hpsa_find_board_params(struct ctlr_info *h)
h->maxsgentries = 31; /* default to traditional values */
h->chainsize = 0;
}
+
+ /* Find out what task management functions are supported and cache */
+ h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
{
- if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
- (readb(&h->cfgtable->Signature[1]) != 'I') ||
- (readb(&h->cfgtable->Signature[2]) != 'S') ||
- (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
@@ -3932,11 +4300,6 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
- if (hpsa_board_disabled(h->pdev)) {
- dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
- return -ENODEV;
- }
-
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
@@ -3946,6 +4309,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
return err;
}
+ /* Enable bus mastering (pci_disable_device may disable this) */
+ pci_set_master(h->pdev);
+
err = pci_request_regions(h->pdev, HPSA);
if (err) {
dev_err(&h->pdev->dev,
@@ -3987,10 +4353,7 @@ err_out_free_res:
iounmap(h->cfgtable);
if (h->vaddr)
iounmap(h->vaddr);
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
+ pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
return err;
}
@@ -4081,14 +4444,33 @@ static int hpsa_request_irq(struct ctlr_info *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
{
- int rc;
+ int rc, i;
- if (h->msix_vector || h->msi_vector)
- rc = request_irq(h->intr[h->intr_mode], msixhandler,
- 0, h->devname, h);
- else
- rc = request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_SHARED, h->devname, h);
+ /*
+ * initialize h->q[x] = x so that interrupt handlers know which
+ * queue to process.
+ */
+ for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ h->q[i] = (u8) i;
+
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+ /* If performant mode and MSI-X, use multiple reply queues */
+ for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ rc = request_irq(h->intr[i], msixhandler,
+ 0, h->devname,
+ &h->q[i]);
+ } else {
+ /* Use single reply pool */
+ if (h->msix_vector || h->msi_vector) {
+ rc = request_irq(h->intr[h->intr_mode],
+ msixhandler, 0, h->devname,
+ &h->q[h->intr_mode]);
+ } else {
+ rc = request_irq(h->intr[h->intr_mode],
+ intxhandler, IRQF_SHARED, h->devname,
+ &h->q[h->intr_mode]);
+ }
+ }
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
@@ -4121,15 +4503,38 @@ static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
return 0;
}
-static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
+static void free_irqs(struct ctlr_info *h)
{
- free_irq(h->intr[h->intr_mode], h);
+ int i;
+
+ if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ /* Single reply queue, only one irq to free */
+ i = h->intr_mode;
+ free_irq(h->intr[i], &h->q[i]);
+ return;
+ }
+
+ for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ free_irq(h->intr[i], &h->q[i]);
+}
+
+static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
+{
+ free_irqs(h);
#ifdef CONFIG_PCI_MSI
- if (h->msix_vector)
- pci_disable_msix(h->pdev);
- else if (h->msi_vector)
- pci_disable_msi(h->pdev);
+ if (h->msix_vector) {
+ if (h->pdev->msix_enabled)
+ pci_disable_msix(h->pdev);
+ } else if (h->msi_vector) {
+ if (h->pdev->msi_enabled)
+ pci_disable_msi(h->pdev);
+ }
#endif /* CONFIG_PCI_MSI */
+}
+
+static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
+{
+ hpsa_free_irqs_and_disable_msix(h);
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
kfree(h->blockFetchTable);
@@ -4165,7 +4570,7 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
while (!list_empty(list)) {
c = list_entry(list->next, struct CommandList, list);
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- finish_cmd(c, c->Header.Tag.lower);
+ finish_cmd(c);
}
}
@@ -4188,9 +4593,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
spin_unlock_irqrestore(&h->lock, flags);
}
-#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
-#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
-
static void detect_controller_lockup(struct ctlr_info *h)
{
u64 now;
@@ -4201,7 +4603,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
now = get_jiffies_64();
/* If we've received an interrupt recently, we're ok. */
if (time_after64(h->last_intr_timestamp +
- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+ (h->heartbeat_sample_interval), now))
return;
/*
@@ -4210,7 +4612,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
* otherwise don't care about signals in this thread.
*/
if (time_after64(h->last_heartbeat_timestamp +
- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+ (h->heartbeat_sample_interval), now))
return;
/* If heartbeat has not changed since we last looked, we're not ok. */
@@ -4252,6 +4654,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
{
unsigned long flags;
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
spin_lock_irqsave(&lockup_detector_lock, flags);
list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
spin_unlock_irqrestore(&lockup_detector_lock, flags);
@@ -4391,7 +4794,7 @@ reinit_after_soft_reset:
spin_lock_irqsave(&h->lock, flags);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[h->intr_mode], h);
+ free_irqs(h);
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
hpsa_intx_discard_completions);
if (rc) {
@@ -4441,7 +4844,7 @@ reinit_after_soft_reset:
clean4:
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
- free_irq(h->intr[h->intr_mode], h);
+ free_irqs(h);
clean2:
clean1:
kfree(h);
@@ -4484,13 +4887,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[h->intr_mode], h);
-#ifdef CONFIG_PCI_MSI
- if (h->msix_vector)
- pci_disable_msix(h->pdev);
- else if (h->msi_vector)
- pci_disable_msi(h->pdev);
-#endif /* CONFIG_PCI_MSI */
+ hpsa_free_irqs_and_disable_msix(h);
}
static void __devexit hpsa_free_device_info(struct ctlr_info *h)
@@ -4529,10 +4926,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
kfree(h->hba_inquiry_data);
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
+ pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
kfree(h);
@@ -4627,11 +5021,8 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
* 10 = 6 s/g entry or 24k
*/
- h->reply_pool_wraparound = 1; /* spec: init to 1 */
-
/* Controller spec: zero out this buffer. */
memset(h->reply_pool, 0, h->reply_pool_size);
- h->reply_pool_head = h->reply_pool;
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -4641,12 +5032,19 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
/* size of controller ring buffer */
writel(h->max_commands, &h->transtable->RepQSize);
- writel(1, &h->transtable->RepQCount);
+ writel(h->nreply_queues, &h->transtable->RepQCount);
writel(0, &h->transtable->RepQCtrAddrLow32);
writel(0, &h->transtable->RepQCtrAddrHigh32);
- writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
- writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant | use_short_tags,
+
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(0, &h->transtable->RepQAddr[i].upper);
+ writel(h->reply_pool_dhandle +
+ (h->max_commands * sizeof(u64) * i),
+ &h->transtable->RepQAddr[i].lower);
+ }
+
+ writel(CFGTBL_Trans_Performant | use_short_tags |
+ CFGTBL_Trans_enable_directed_msix,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
@@ -4664,6 +5062,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ int i;
if (hpsa_simple_mode)
return;
@@ -4672,12 +5071,20 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
if (!(trans_support & PERFORMANT_MODE))
return;
+ h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
- h->reply_pool_size = h->max_commands * sizeof(u64);
+ h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
+ for (i = 0; i < h->nreply_queues; i++) {
+ h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
+ h->reply_queue[i].size = h->max_commands;
+ h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
+ h->reply_queue[i].current_entry = 0;
+ }
+
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 7b28d54fa878..981647989bfd 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -34,7 +34,7 @@ struct access_method {
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
unsigned long (*fifo_full)(struct ctlr_info *h);
bool (*intr_pending)(struct ctlr_info *h);
- unsigned long (*command_completed)(struct ctlr_info *h);
+ unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
};
struct hpsa_scsi_dev_t {
@@ -48,6 +48,13 @@ struct hpsa_scsi_dev_t {
unsigned char raid_level; /* from inquiry page 0xC1 */
};
+struct reply_pool {
+ u64 *head;
+ size_t size;
+ u8 wraparound;
+ u32 current_entry;
+};
+
struct ctlr_info {
int ctlr;
char devname[8];
@@ -68,7 +75,7 @@ struct ctlr_info {
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
- unsigned int intr[4];
+ unsigned int intr[MAX_REPLY_QUEUES];
unsigned int msix_vector;
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
@@ -78,7 +85,6 @@ struct ctlr_info {
struct list_head reqQ;
struct list_head cmpQ;
unsigned int Qdepth;
- unsigned int maxQsinceinit;
unsigned int maxSG;
spinlock_t lock;
int maxsgentries;
@@ -111,20 +117,45 @@ struct ctlr_info {
unsigned long transMethod;
/*
- * Performant mode completion buffer
+ * Performant mode completion buffers
*/
u64 *reply_pool;
- dma_addr_t reply_pool_dhandle;
- u64 *reply_pool_head;
size_t reply_pool_size;
- unsigned char reply_pool_wraparound;
+ struct reply_pool reply_queue[MAX_REPLY_QUEUES];
+ u8 nreply_queues;
+ dma_addr_t reply_pool_dhandle;
u32 *blockFetchTable;
unsigned char *hba_inquiry_data;
u64 last_intr_timestamp;
u32 last_heartbeat;
u64 last_heartbeat_timestamp;
+ u32 heartbeat_sample_interval;
+ atomic_t firmware_flash_in_progress;
u32 lockup_detected;
struct list_head lockup_list;
+ /* Address of h->q[x] is passed to intr handler to know which queue */
+ u8 q[MAX_REPLY_QUEUES];
+ u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
+#define HPSATMF_BITS_SUPPORTED (1 << 0)
+#define HPSATMF_PHYS_LUN_RESET (1 << 1)
+#define HPSATMF_PHYS_NEX_RESET (1 << 2)
+#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
+#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
+#define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
+#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
+#define HPSATMF_PHYS_QRY_TASK (1 << 7)
+#define HPSATMF_PHYS_QRY_TSET (1 << 8)
+#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
+#define HPSATMF_MASK_SUPPORTED (1 << 16)
+#define HPSATMF_LOG_LUN_RESET (1 << 17)
+#define HPSATMF_LOG_NEX_RESET (1 << 18)
+#define HPSATMF_LOG_TASK_ABORT (1 << 19)
+#define HPSATMF_LOG_TSET_ABORT (1 << 20)
+#define HPSATMF_LOG_CLEAR_ACA (1 << 21)
+#define HPSATMF_LOG_CLEAR_TSET (1 << 22)
+#define HPSATMF_LOG_QRY_TASK (1 << 23)
+#define HPSATMF_LOG_QRY_TSET (1 << 24)
+#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
};
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
@@ -216,9 +247,6 @@ static void SA5_submit_command(struct ctlr_info *h,
c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- h->commands_outstanding++;
- if (h->commands_outstanding > h->max_outstanding)
- h->max_outstanding = h->commands_outstanding;
}
/*
@@ -254,16 +282,17 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
}
}
-static unsigned long SA5_performant_completed(struct ctlr_info *h)
+static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
{
- unsigned long register_value = FIFO_EMPTY;
+ struct reply_pool *rq = &h->reply_queue[q];
+ unsigned long flags, register_value = FIFO_EMPTY;
- /* flush the controller write of the reply queue by reading
- * outbound doorbell status register.
- */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
/* msi auto clears the interrupt pending bit. */
if (!(h->msi_vector || h->msix_vector)) {
+ /* flush the controller write of the reply queue by reading
+ * outbound doorbell status register.
+ */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
/* Do a read in order to flush the write to the controller
* (as per spec.)
@@ -271,19 +300,20 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h)
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
}
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- register_value = *(h->reply_pool_head);
- (h->reply_pool_head)++;
+ if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+ register_value = rq->head[rq->current_entry];
+ rq->current_entry++;
+ spin_lock_irqsave(&h->lock, flags);
h->commands_outstanding--;
+ spin_unlock_irqrestore(&h->lock, flags);
} else {
register_value = FIFO_EMPTY;
}
/* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
+ if (rq->current_entry == h->max_commands) {
+ rq->current_entry = 0;
+ rq->wraparound ^= 1;
}
-
return register_value;
}
@@ -303,13 +333,18 @@ static unsigned long SA5_fifo_full(struct ctlr_info *h)
* returns value read from hardware.
* returns FIFO_EMPTY if there is nothing to read
*/
-static unsigned long SA5_completed(struct ctlr_info *h)
+static unsigned long SA5_completed(struct ctlr_info *h,
+ __attribute__((unused)) u8 q)
{
unsigned long register_value
= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+ unsigned long flags;
- if (register_value != FIFO_EMPTY)
+ if (register_value != FIFO_EMPTY) {
+ spin_lock_irqsave(&h->lock, flags);
h->commands_outstanding--;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
#ifdef HPSA_DEBUG
if (register_value != FIFO_EMPTY)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 8049815d8c1e..a894f2eca7ac 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -82,6 +82,29 @@
#define TYPE_CMD 0x00
#define TYPE_MSG 0x01
+/* Message Types */
+#define HPSA_TASK_MANAGEMENT 0x00
+#define HPSA_RESET 0x01
+#define HPSA_SCAN 0x02
+#define HPSA_NOOP 0x03
+
+#define HPSA_CTLR_RESET_TYPE 0x00
+#define HPSA_BUS_RESET_TYPE 0x01
+#define HPSA_TARGET_RESET_TYPE 0x03
+#define HPSA_LUN_RESET_TYPE 0x04
+#define HPSA_NEXUS_RESET_TYPE 0x05
+
+/* Task Management Functions */
+#define HPSA_TMF_ABORT_TASK 0x00
+#define HPSA_TMF_ABORT_TASK_SET 0x01
+#define HPSA_TMF_CLEAR_ACA 0x02
+#define HPSA_TMF_CLEAR_TASK_SET 0x03
+#define HPSA_TMF_QUERY_TASK 0x04
+#define HPSA_TMF_QUERY_TASK_SET 0x05
+#define HPSA_TMF_QUERY_ASYNCEVENT 0x06
+
+
+
/* config space register offsets */
#define CFG_VENDORID 0x00
#define CFG_DEVICEID 0x02
@@ -106,6 +129,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
#define CFGTBL_Trans_use_short_tags 0x20000000l
+#define CFGTBL_Trans_enable_directed_msix (1 << 30)
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -162,6 +186,7 @@ struct SenseSubsystem_info {
#define BMIC_WRITE 0x27
#define BMIC_CACHE_FLUSH 0xc2
#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
+#define BMIC_FLASH_FIRMWARE 0xF7
/* Command List Structure */
union SCSI3Addr {
@@ -337,11 +362,17 @@ struct CfgTable {
u32 MaxPhysicalDevices;
u32 MaxPhysicalDrivesPerLogicalUnit;
u32 MaxPerformantModeCommands;
- u8 reserved[0x78 - 0x58];
+ u32 MaxBlockFetch;
+ u32 PowerConservationSupport;
+ u32 PowerConservationEnable;
+ u32 TMFSupportFlags;
+ u8 TMFTagMask[8];
+ u8 reserved[0x78 - 0x70];
u32 misc_fw_support; /* offset 0x78 */
#define MISC_FW_DOORBELL_RESET (0x02)
#define MISC_FW_DOORBELL_RESET2 (0x010)
u8 driver_version[32];
+
};
#define NUM_BLOCKFETCH_ENTRIES 8
@@ -351,8 +382,8 @@ struct TransTable_struct {
u32 RepQCount;
u32 RepQCtrAddrLow32;
u32 RepQCtrAddrHigh32;
- u32 RepQAddr0Low32;
- u32 RepQAddr0High32;
+#define MAX_REPLY_QUEUES 8
+ struct vals32 RepQAddr[MAX_REPLY_QUEUES];
};
struct hpsa_pci_info {
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d4bf9c12ecd4..45385f531649 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
static bool sci_controller_isr(struct isci_host *ihost)
{
- if (sci_controller_completion_queue_has_entries(ihost)) {
+ if (sci_controller_completion_queue_has_entries(ihost))
return true;
- } else {
- /*
- * we have a spurious interrupt it could be that we have already
- * emptied the completion queue from a previous interrupt */
- writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
- /*
- * There is a race in the hardware that could cause us not to be notified
- * of an interrupt completion if we do not take this step. We will mask
- * then unmask the interrupts so if there is another interrupt pending
- * the clearing of the interrupt source we get the next interrupt message. */
+ /* we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt
+ * FIXME: really!?
+ */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /* There is a race in the hardware that could cause us not to be
+ * notified of an interrupt completion if we do not take this
+ * step. We will mask then unmask the interrupts so if there is
+ * another interrupt pending the clearing of the interrupt
+ * source we get the next interrupt message.
+ */
+ spin_lock(&ihost->scic_lock);
+ if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
writel(0, &ihost->smu_registers->interrupt_mask);
}
+ spin_unlock(&ihost->scic_lock);
return false;
}
@@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
if (completion_status != SCI_SUCCESS)
dev_info(&ihost->pdev->dev,
"controller start timed out, continuing...\n");
- isci_host_change_state(ihost, isci_ready);
clear_bit(IHOST_START_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
}
@@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
sas_drain_work(ha);
- dev_dbg(&ihost->pdev->dev,
- "%s: ihost->status = %d, time = %ld\n",
- __func__, isci_host_get_state(ihost), time);
-
return 1;
-
}
/**
@@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
static void sci_controller_enable_interrupts(struct isci_host *ihost)
{
- BUG_ON(ihost->smu_registers == NULL);
+ set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
writel(0, &ihost->smu_registers->interrupt_mask);
}
void sci_controller_disable_interrupts(struct isci_host *ihost)
{
- BUG_ON(ihost->smu_registers == NULL);
+ clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+ readl(&ihost->smu_registers->interrupt_mask); /* flush */
}
static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
@@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *
&ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
}
-static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
{
if (ihost->sm.current_state_id == SCIC_STARTING) {
/*
@@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy)
case SCI_PHY_SUB_AWAIT_SATA_POWER:
case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
case SCI_PHY_SUB_FINAL:
return true;
@@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy)
}
}
+bool is_controller_start_complete(struct isci_host *ihost)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ u32 state = iphy->sm.current_state_id;
+
+ /* in apc mode we need to check every phy, in
+ * mpc mode we only need to check phys that have
+ * been configured into a port
+ */
+ if (is_port_config_apc(ihost))
+ /* pass */;
+ else if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
+ return false;
+ }
+
+ return true;
+}
+
/**
* sci_controller_start_next_phy - start phy
* @scic: controller
@@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
return status;
if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
- bool is_controller_start_complete = true;
- u32 state;
- u8 index;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- iphy = &ihost->phys[index];
- state = iphy->sm.current_state_id;
-
- if (!phy_get_non_dummy_port(iphy))
- continue;
-
- /* The controller start operation is complete iff:
- * - all links have been given an opportunity to start
- * - have no indication of a connected device
- * - have an indication of a connected device and it has
- * finished the link training process.
- */
- if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
- (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
- (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
- is_controller_start_complete = false;
- break;
- }
- }
-
- /*
- * The controller has successfully finished the start process.
- * Inform the SCI Core user and transition to the READY state. */
- if (is_controller_start_complete == true) {
+ if (is_controller_start_complete(ihost)) {
sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
sci_del_timer(&ihost->phy_timer);
ihost->phy_startup_timer_pending = false;
@@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
u16 index;
if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller start operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost)
spin_unlock_irq(&ihost->scic_lock);
}
-static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+static void isci_host_stop_complete(struct isci_host *ihost)
{
- isci_host_change_state(ihost, isci_stopped);
sci_controller_disable_interrupts(ihost);
clear_bit(IHOST_STOP_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
@@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
writel(0, &ihost->smu_registers->interrupt_mask);
}
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
+{
+ task->lldd_task = NULL;
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
+ !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+
+ task->task_done(task);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+
+ sas_task_abort(task);
+ }
+ }
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
+
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+}
/**
* isci_host_completion_routine() - This function is the delayed service
* routine that calls the sci core library's completion handler. It's
@@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
* @data: This parameter specifies the ISCI host object
*
*/
-static void isci_host_completion_routine(unsigned long data)
+void isci_host_completion_routine(unsigned long data)
{
struct isci_host *ihost = (struct isci_host *)data;
- struct list_head completed_request_list;
- struct list_head errored_request_list;
- struct list_head *current_position;
- struct list_head *next_position;
- struct isci_request *request;
- struct isci_request *next_request;
- struct sas_task *task;
u16 active;
- INIT_LIST_HEAD(&completed_request_list);
- INIT_LIST_HEAD(&errored_request_list);
-
spin_lock_irq(&ihost->scic_lock);
-
sci_controller_completion_handler(ihost);
-
- /* Take the lists of completed I/Os from the host. */
-
- list_splice_init(&ihost->requests_to_complete,
- &completed_request_list);
-
- /* Take the list of errored I/Os from the host. */
- list_splice_init(&ihost->requests_to_errorback,
- &errored_request_list);
-
spin_unlock_irq(&ihost->scic_lock);
- /* Process any completions in the lists. */
- list_for_each_safe(current_position, next_position,
- &completed_request_list) {
-
- request = list_entry(current_position, struct isci_request,
- completed_node);
- task = isci_request_access_task(request);
-
- /* Normal notification (task_done) */
- dev_dbg(&ihost->pdev->dev,
- "%s: Normal - request/task = %p/%p\n",
- __func__,
- request,
- task);
-
- /* Return the task to libsas */
- if (task != NULL) {
-
- task->lldd_task = NULL;
- if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
- /* If the task is already in the abort path,
- * the task_done callback cannot be called.
- */
- task->task_done(task);
- }
- }
-
- spin_lock_irq(&ihost->scic_lock);
- isci_free_tag(ihost, request->io_tag);
- spin_unlock_irq(&ihost->scic_lock);
- }
- list_for_each_entry_safe(request, next_request, &errored_request_list,
- completed_node) {
-
- task = isci_request_access_task(request);
-
- /* Use sas_task_abort */
- dev_warn(&ihost->pdev->dev,
- "%s: Error - request/task = %p/%p\n",
- __func__,
- request,
- task);
-
- if (task != NULL) {
-
- /* Put the task into the abort path if it's not there
- * already.
- */
- if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
- sas_task_abort(task);
-
- } else {
- /* This is a case where the request has completed with a
- * status such that it needed further target servicing,
- * but the sas_task reference has already been removed
- * from the request. Since it was errored, it was not
- * being aborted, so there is nothing to do except free
- * it.
- */
-
- spin_lock_irq(&ihost->scic_lock);
- /* Remove the request from the remote device's list
- * of pending requests.
- */
- list_del_init(&request->dev_node);
- isci_free_tag(ihost, request->io_tag);
- spin_unlock_irq(&ihost->scic_lock);
- }
- }
-
/* the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests
*/
@@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data)
static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
{
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller stop operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
switch (ihost->sm.current_state_id) {
case SCIC_RESET:
case SCIC_READY:
- case SCIC_STOPPED:
+ case SCIC_STOPPING:
case SCIC_FAILED:
/*
* The reset operation is not a graceful cleanup, just
@@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
sci_change_state(&ihost->sm, SCIC_RESETTING);
return SCI_SUCCESS;
default:
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller reset operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
}
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+
+/**
+ * isci_host_deinit - shutdown frame reception and dma
+ * @ihost: host to take down
+ *
+ * This is called in either the driver shutdown or the suspend path. In
+ * the shutdown case libsas went through port teardown and normal device
+ * removal (i.e. physical links stayed up to service scsi_device removal
+ * commands). In the suspend case we disable the hardware without
+ * notifying libsas of the link down events since we want libsas to
+ * remember the domain across the suspend/resume cycle
+ */
void isci_host_deinit(struct isci_host *ihost)
{
int i;
@@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost)
for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
- isci_host_change_state(ihost, isci_stopping);
- for (i = 0; i < SCI_MAX_PORTS; i++) {
- struct isci_port *iport = &ihost->ports[i];
- struct isci_remote_device *idev, *d;
-
- list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
- if (test_bit(IDEV_ALLOCATED, &idev->flags))
- isci_remote_device_stop(ihost, idev);
- }
- }
-
set_bit(IHOST_STOP_PENDING, &ihost->flags);
spin_lock_irq(&ihost->scic_lock);
@@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost)
wait_for_stop(ihost);
+ /* phy stop is after controller stop to allow port and device to
+ * go idle before shutting down the phys, but the expectation is
+ * that i/o has been shut off well before we reach this
+ * function.
+ */
+ sci_controller_stop_phys(ihost);
+
/* disable sgpio: where the above wait should give time for the
* enclosure to sample the gpios going inactive
*/
writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
+ spin_lock_irq(&ihost->scic_lock);
sci_controller_reset(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
/* Cancel any/all outstanding port timers */
for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}
-static void isci_user_parameters_get(struct sci_user_parameters *u)
-{
- int i;
-
- for (i = 0; i < SCI_MAX_PHYS; i++) {
- struct sci_phy_user_params *u_phy = &u->phys[i];
-
- u_phy->max_speed_generation = phy_gen;
-
- /* we are not exporting these for now */
- u_phy->align_insertion_frequency = 0x7f;
- u_phy->in_connection_align_insertion_frequency = 0xff;
- u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
- }
-
- u->stp_inactivity_timeout = stp_inactive_to;
- u->ssp_inactivity_timeout = ssp_inactive_to;
- u->stp_max_occupancy_timeout = stp_max_occ_to;
- u->ssp_max_occupancy_timeout = ssp_max_occ_to;
- u->no_outbound_task_timeout = no_outbound_task_to;
- u->max_concurr_spinup = max_concurr_spinup;
-}
-
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
-static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
-{
- u32 index;
- enum sci_status status;
- enum sci_status phy_status;
-
- status = SCI_SUCCESS;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- phy_status = sci_phy_stop(&ihost->phys[index]);
-
- if (phy_status != SCI_SUCCESS &&
- phy_status != SCI_FAILURE_INVALID_STATE) {
- status = SCI_FAILURE;
-
- dev_warn(&ihost->pdev->dev,
- "%s: Controller stop operation failed to stop "
- "phy %d because of status %d.\n",
- __func__,
- ihost->phys[index].phy_index, phy_status);
- }
- }
-
- return status;
-}
-
static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
{
u32 index;
@@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
- /* Stop all of the components for this controller */
- sci_controller_stop_phys(ihost);
- sci_controller_stop_ports(ihost);
sci_controller_stop_devices(ihost);
+ sci_controller_stop_ports(ihost);
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ isci_host_stop_complete(ihost);
}
static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
@@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost)
/* The write to the UFQGP clears the UFQPR */
writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+
+ /* clear all interrupts */
+ writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
}
static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
@@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = {
.enter_state = sci_controller_stopping_state_enter,
.exit_state = sci_controller_stopping_state_exit,
},
- [SCIC_STOPPED] = {},
[SCIC_FAILED] = {}
};
-static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
-{
- /* these defaults are overridden by the platform / firmware */
- u16 index;
-
- /* Default to APC mode. */
- ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-
- /* Default to APC mode. */
- ihost->oem_parameters.controller.max_concurr_spin_up = 1;
-
- /* Default to no SSC operation. */
- ihost->oem_parameters.controller.do_enable_ssc = false;
-
- /* Default to short cables on all phys. */
- ihost->oem_parameters.controller.cable_selection_mask = 0;
-
- /* Initialize all of the port parameter information to narrow ports. */
- for (index = 0; index < SCI_MAX_PORTS; index++) {
- ihost->oem_parameters.ports[index].phy_mask = 0;
- }
-
- /* Initialize all of the phy parameter information. */
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 3G (i.e. Gen 2). */
- ihost->user_parameters.phys[index].max_speed_generation =
- SCIC_SDS_PARM_GEN2_SPEED;
-
- /* the frequencies cannot be 0 */
- ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
- ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
- ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
-
- /*
- * Previous Vitesse based expanders had a arbitration issue that
- * is worked around by having the upper 32-bits of SAS address
- * with a value greater then the Vitesse company identifier.
- * Hence, usage of 0x5FCFFFFF. */
- ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
- ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
- }
-
- ihost->user_parameters.stp_inactivity_timeout = 5;
- ihost->user_parameters.ssp_inactivity_timeout = 5;
- ihost->user_parameters.stp_max_occupancy_timeout = 5;
- ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 2;
-}
-
static void controller_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data)
sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
else if (sm->current_state_id == SCIC_STOPPING) {
sci_change_state(sm, SCIC_FAILED);
- isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+ isci_host_stop_complete(ihost);
} else /* / @todo Now what do we want to do in this case? */
dev_err(&ihost->pdev->dev,
"%s: Controller timer fired when controller was not "
@@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
sci_init_timer(&ihost->timer, controller_timeout);
- /* Initialize the User and OEM parameters to default values. */
- sci_controller_set_default_config_parameters(ihost);
-
return sci_controller_reset(ihost);
}
@@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
return 0;
}
-static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
-{
- u32 state = ihost->sm.current_state_id;
- struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
-
- if (state == SCIC_RESET ||
- state == SCIC_INITIALIZING ||
- state == SCIC_INITIALIZED) {
- u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
- ISCI_ROM_VER_1_0;
-
- if (sci_oem_parameters_validate(&ihost->oem_parameters,
- oem_version))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- return SCI_SUCCESS;
- }
-
- return SCI_FAILURE_INVALID_STATE;
-}
-
static u8 max_spin_up(struct isci_host *ihost)
{
if (ihost->user_parameters.max_concurr_spinup)
@@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
u8 j;
for (j = 0; j < SCI_MAX_PHYS; j++) {
@@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
sizeof(current_phy->frame_rcvd.iaf.sas_addr));
if (current_phy->sm.current_state_id == SCI_PHY_READY &&
- current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ current_phy->protocol == SAS_PROTOCOL_SSP &&
other == 0) {
sci_phy_consume_power_handler(iphy);
break;
@@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
unsigned long i, state, val;
if (ihost->sm.current_state_id != SCIC_RESET) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller initialize operation requested "
- "in invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
return result;
}
-static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
- struct sci_user_parameters *sci_parms)
-{
- u32 state = ihost->sm.current_state_id;
-
- if (state == SCIC_RESET ||
- state == SCIC_INITIALIZING ||
- state == SCIC_INITIALIZED) {
- u16 index;
-
- /*
- * Validate the user parameters. If they are not legal, then
- * return a failure.
- */
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- struct sci_phy_user_params *user_phy;
-
- user_phy = &sci_parms->phys[index];
-
- if (!((user_phy->max_speed_generation <=
- SCIC_SDS_PARM_MAX_SPEED) &&
- (user_phy->max_speed_generation >
- SCIC_SDS_PARM_NO_SPEED)))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- if (user_phy->in_connection_align_insertion_frequency <
- 3)
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- if ((user_phy->in_connection_align_insertion_frequency <
- 3) ||
- (user_phy->align_insertion_frequency == 0) ||
- (user_phy->
- notify_enable_spin_up_insertion_frequency ==
- 0))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
- }
-
- if ((sci_parms->stp_inactivity_timeout == 0) ||
- (sci_parms->ssp_inactivity_timeout == 0) ||
- (sci_parms->stp_max_occupancy_timeout == 0) ||
- (sci_parms->ssp_max_occupancy_timeout == 0) ||
- (sci_parms->no_outbound_task_timeout == 0))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
-
- return SCI_SUCCESS;
- }
-
- return SCI_FAILURE_INVALID_STATE;
-}
-
-static int sci_controller_mem_init(struct isci_host *ihost)
+static int sci_controller_dma_alloc(struct isci_host *ihost)
{
struct device *dev = &ihost->pdev->dev;
- dma_addr_t dma;
size_t size;
- int err;
+ int i;
+
+ /* detect re-initialization */
+ if (ihost->completion_queue)
+ return 0;
size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
- ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
+ GFP_KERNEL);
if (!ihost->completion_queue)
return -ENOMEM;
- writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
-
size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
- ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
GFP_KERNEL);
+
if (!ihost->remote_node_context_table)
return -ENOMEM;
- writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
-
size = ihost->task_context_entries * sizeof(struct scu_task_context),
- ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
+ GFP_KERNEL);
if (!ihost->task_context_table)
return -ENOMEM;
- ihost->task_context_dma = dma;
- writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+ size = SCI_UFI_TOTAL_SIZE;
+ ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
+ if (!ihost->ufi_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ int err = sci_controller_dma_alloc(ihost);
- err = sci_unsolicited_frame_control_construct(ihost);
if (err)
return err;
+ writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
+
+ writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
+
+ writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
+
+ sci_unsolicited_frame_control_construct(ihost);
+
/*
* Inform the silicon as to the location of the UF headers and
* address table.
@@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost)
return 0;
}
+/**
+ * isci_host_init - (re-)initialize hardware and internal (private) state
+ * @ihost: host to init
+ *
+ * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
+ * one-time initialization objects like locks and waitqueues, are
+ * not touched (they are initialized in isci_host_alloc)
+ */
int isci_host_init(struct isci_host *ihost)
{
- int err = 0, i;
+ int i, err;
enum sci_status status;
- struct sci_user_parameters sci_user_params;
- struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
-
- spin_lock_init(&ihost->state_lock);
- spin_lock_init(&ihost->scic_lock);
- init_waitqueue_head(&ihost->eventq);
-
- isci_host_change_state(ihost, isci_starting);
-
- status = sci_controller_construct(ihost, scu_base(ihost),
- smu_base(ihost));
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
+ spin_unlock_irq(&ihost->scic_lock);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: sci_controller_construct failed - status = %x\n",
@@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost)
return -ENODEV;
}
- ihost->sas_ha.dev = &ihost->pdev->dev;
- ihost->sas_ha.lldd_ha = ihost;
-
- /*
- * grab initial values stored in the controller object for OEM and USER
- * parameters
- */
- isci_user_parameters_get(&sci_user_params);
- status = sci_user_parameters_set(ihost, &sci_user_params);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "%s: sci_user_parameters_set failed\n",
- __func__);
- return -ENODEV;
- }
-
- /* grab any OEM parameters specified in orom */
- if (pci_info->orom) {
- status = isci_parse_oem_parameters(&ihost->oem_parameters,
- pci_info->orom,
- ihost->id);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "parsing firmware oem parameters failed\n");
- return -EINVAL;
- }
- }
-
- status = sci_oem_parameters_set(ihost);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "%s: sci_oem_parameters_set failed\n",
- __func__);
- return -ENODEV;
- }
-
- tasklet_init(&ihost->completion_tasklet,
- isci_host_completion_routine, (unsigned long)ihost);
-
- INIT_LIST_HEAD(&ihost->requests_to_complete);
- INIT_LIST_HEAD(&ihost->requests_to_errorback);
-
spin_lock_irq(&ihost->scic_lock);
status = sci_controller_initialize(ihost);
spin_unlock_irq(&ihost->scic_lock);
@@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost)
if (err)
return err;
- for (i = 0; i < SCI_MAX_PORTS; i++)
- isci_port_init(&ihost->ports[i], ihost, i);
-
- for (i = 0; i < SCI_MAX_PHYS; i++)
- isci_phy_init(&ihost->phys[i], ihost, i);
-
/* enable sgpio */
writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
- for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
- struct isci_remote_device *idev = &ihost->devices[i];
-
- INIT_LIST_HEAD(&idev->reqs_in_process);
- INIT_LIST_HEAD(&idev->node);
- }
-
- for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
- struct isci_request *ireq;
- dma_addr_t dma;
-
- ireq = dmam_alloc_coherent(&ihost->pdev->dev,
- sizeof(struct isci_request), &dma,
- GFP_KERNEL);
- if (!ireq)
- return -ENOMEM;
-
- ireq->tc = &ihost->task_context_table[i];
- ireq->owning_controller = ihost;
- spin_lock_init(&ireq->state_lock);
- ireq->request_daddr = dma;
- ireq->isci_host = ihost;
- ihost->reqs[i] = ireq;
- }
-
return 0;
}
@@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
}
}
-static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
{
u32 index;
@@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
}
if (!sci_controller_has_remote_devices_stopping(ihost))
- sci_change_state(&ihost->sm, SCIC_STOPPED);
+ isci_host_stop_complete(ihost);
}
void sci_controller_post_request(struct isci_host *ihost, u32 request)
@@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost,
enum sci_status status;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
enum sci_status status;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev,
- "invalid state to terminate request\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
-
status = sci_io_request_terminate(ireq);
- if (status != SCI_SUCCESS)
- return status;
- /*
- * Utilize the original post context command and or in the POST_TC_ABORT
- * request sub-type.
- */
- sci_controller_post_request(ihost,
- ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
- return SCI_SUCCESS;
+ dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
+ __func__, status, ireq, ireq->flags);
+
+ if ((status == SCI_SUCCESS) &&
+ !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
+ !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
+ /* Utilize the original post context command and or in the
+ * POST_TC_ABORT request sub-type.
+ */
+ sci_controller_post_request(
+ ihost, ireq->post_context |
+ SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ }
+ return status;
}
/**
@@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
- dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
struct isci_host *ihost = ireq->owning_controller;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69d1069..9ab58e0540e7 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -55,6 +55,7 @@
#ifndef _SCI_HOST_H_
#define _SCI_HOST_H_
+#include <scsi/sas_ata.h>
#include "remote_device.h"
#include "phy.h"
#include "isci.h"
@@ -108,6 +109,8 @@ struct sci_port_configuration_agent;
typedef void (*port_config_fn)(struct isci_host *,
struct sci_port_configuration_agent *,
struct isci_port *, struct isci_phy *);
+bool is_port_config_apc(struct isci_host *ihost);
+bool is_controller_start_complete(struct isci_host *ihost);
struct sci_port_configuration_agent {
u16 phy_configured_mask;
@@ -157,13 +160,17 @@ struct isci_host {
struct sci_power_control power_control;
u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
struct scu_task_context *task_context_table;
- dma_addr_t task_context_dma;
+ dma_addr_t tc_dma;
union scu_remote_node_context *remote_node_context_table;
+ dma_addr_t rnc_dma;
u32 *completion_queue;
+ dma_addr_t cq_dma;
u32 completion_queue_get;
u32 logical_port_entries;
u32 remote_node_entries;
u32 task_context_entries;
+ void *ufi_buf;
+ dma_addr_t ufi_dma;
struct sci_unsolicited_frame_control uf_control;
/* phy startup */
@@ -190,17 +197,13 @@ struct isci_host {
struct asd_sas_port sas_ports[SCI_MAX_PORTS];
struct sas_ha_struct sas_ha;
- spinlock_t state_lock;
struct pci_dev *pdev;
- enum isci_status status;
#define IHOST_START_PENDING 0
#define IHOST_STOP_PENDING 1
+ #define IHOST_IRQ_ENABLED 2
unsigned long flags;
wait_queue_head_t eventq;
- struct Scsi_Host *shost;
struct tasklet_struct completion_tasklet;
- struct list_head requests_to_complete;
- struct list_head requests_to_errorback;
spinlock_t scic_lock;
struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
@@ -274,13 +277,6 @@ enum sci_controller_states {
SCIC_STOPPING,
/**
- * This state indicates that the controller has successfully been stopped.
- * In this state no new IO operations are permitted.
- * This state is entered from the STOPPING state.
- */
- SCIC_STOPPED,
-
- /**
* This state indicates that the controller could not successfully be
* initialized. In this state no new IO operations are permitted.
* This state is entered from the INITIALIZING state.
@@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
return pci_get_drvdata(pdev);
}
+static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+{
+ return ihost->sas_ha.core.shost;
+}
+
#define for_each_isci_host(id, ihost, pdev) \
for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
ihost = to_pci_info(pdev)->hosts[++id])
-static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
-{
- return isci_host->status;
-}
-
-static inline void isci_host_change_state(struct isci_host *isci_host,
- enum isci_status status)
-{
- unsigned long flags;
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_host = %p, state = 0x%x",
- __func__,
- isci_host,
- status);
- spin_lock_irqsave(&isci_host->state_lock, flags);
- isci_host->status = status;
- spin_unlock_irqrestore(&isci_host->state_lock, flags);
-
-}
-
static inline void wait_for_start(struct isci_host *ihost)
{
wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
return dev->port->ha->lldd_ha;
}
+static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
+{
+ return dev_to_ihost(idev->domain_dev);
+}
+
/* we always use protocol engine group zero */
#define ISCI_PEG 0
@@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
- if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
- !idev->is_direct_attached)
+ if (dev_is_sata(dev) && dev->parent)
return SCU_STP_REMOTE_NODE_COUNT;
return SCU_SSP_REMOTE_NODE_COUNT;
}
@@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context(
struct isci_remote_device *idev,
u16 node_id);
-struct isci_request *sci_request_by_tag(struct isci_host *ihost,
- u16 io_tag);
-
-void sci_controller_power_control_queue_insert(
- struct isci_host *ihost,
- struct isci_phy *iphy);
-
-void sci_controller_power_control_queue_remove(
- struct isci_host *ihost,
- struct isci_phy *iphy);
-
-void sci_controller_link_up(
- struct isci_host *ihost,
- struct isci_port *iport,
- struct isci_phy *iphy);
-
-void sci_controller_link_down(
- struct isci_host *ihost,
- struct isci_port *iport,
- struct isci_phy *iphy);
-
-void sci_controller_remote_device_stopped(
- struct isci_host *ihost,
- struct isci_remote_device *idev);
-
-void sci_controller_copy_task_context(
- struct isci_host *ihost,
- struct isci_request *ireq);
-
-void sci_controller_register_setup(struct isci_host *ihost);
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev);
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
@@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
int isci_host_init(struct isci_host *);
-
-void isci_host_init_controller_names(
- struct isci_host *isci_host,
- unsigned int controller_idx);
-
-void isci_host_deinit(
- struct isci_host *);
-
-void isci_host_port_link_up(
- struct isci_host *,
- struct isci_port *,
- struct isci_phy *);
-int isci_host_dev_found(struct domain_device *);
-
-void isci_host_remote_device_start_complete(
- struct isci_host *,
- struct isci_remote_device *,
- enum sci_status);
-
-void sci_controller_disable_interrupts(
- struct isci_host *ihost);
+void isci_host_completion_routine(unsigned long data);
+void isci_host_deinit(struct isci_host *);
+void sci_controller_disable_interrupts(struct isci_host *ihost);
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
enum sci_status sci_controller_start_io(
struct isci_host *ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 5137db5a5d85..47e28b555029 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host)
if (!isci_host)
return;
- shost = isci_host->shost;
-
sas_unregister_ha(&isci_host->sas_ha);
- sas_remove_host(isci_host->shost);
- scsi_remove_host(isci_host->shost);
- scsi_host_put(isci_host->shost);
+ shost = to_shost(isci_host);
+ sas_remove_host(shost);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
}
static int __devinit isci_pci_init(struct pci_dev *pdev)
@@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
return err;
}
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_concurr_spinup = max_concurr_spinup;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *u;
+
+ u = &sci_parms->phys[index];
+
+ if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
+ (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (u->in_connection_align_insertion_frequency < 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((u->in_connection_align_insertion_frequency < 3) ||
+ (u->align_insertion_frequency == 0) ||
+ (u->notify_enable_spin_up_insertion_frequency == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+}
+
+static void sci_oem_defaults(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ struct sci_user_parameters *user = &ihost->user_parameters;
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ int i;
+
+ /* Default to APC mode. */
+ oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ oem->controller.max_concurr_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ oem->controller.do_enable_ssc = false;
+
+ /* Default to short cables on all phys. */
+ oem->controller.cable_selection_mask = 0;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ oem->ports[i].phy_mask = 0;
+
+ /* Initialize all of the phy parameter information. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Default to 3G (i.e. Gen 2). */
+ user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
+
+ /* the frequencies cannot be 0 */
+ user->phys[i].align_insertion_frequency = 0x7f;
+ user->phys[i].in_connection_align_insertion_frequency = 0xff;
+ user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /* Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF.
+ */
+ oem->phys[i].sas_address.low = 0x1 + ihost->id;
+ oem->phys[i].sas_address.high = 0x5FCFFFFF;
+ }
+
+ user->stp_inactivity_timeout = 5;
+ user->ssp_inactivity_timeout = 5;
+ user->stp_max_occupancy_timeout = 5;
+ user->ssp_max_occupancy_timeout = 20;
+ user->no_outbound_task_timeout = 2;
+}
+
static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
{
- struct isci_host *isci_host;
+ struct isci_orom *orom = to_pci_info(pdev)->orom;
+ struct sci_user_parameters sci_user_params;
+ u8 oem_version = ISCI_ROM_VER_1_0;
+ struct isci_host *ihost;
struct Scsi_Host *shost;
- int err;
+ int err, i;
- isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
- if (!isci_host)
+ ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
+ if (!ihost)
return NULL;
- isci_host->pdev = pdev;
- isci_host->id = id;
+ ihost->pdev = pdev;
+ ihost->id = id;
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ /* validate module parameters */
+ /* TODO: kill struct sci_user_parameters and reference directly */
+ sci_oem_defaults(ihost);
+ isci_user_parameters_get(&sci_user_params);
+ if (sci_user_parameters_set(ihost, &sci_user_params)) {
+ dev_warn(&pdev->dev,
+ "%s: sci_user_parameters_set failed\n", __func__);
+ return NULL;
+ }
+
+ /* sanity check platform (or 'firmware') oem parameters */
+ if (orom) {
+ if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
+ dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
+ return NULL;
+ }
+ ihost->oem_parameters = orom->ctrl[id];
+ oem_version = orom->hdr.version;
+ }
+
+ /* validate oem parameters (platform, firmware, or built-in defaults) */
+ if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
+ dev_warn(&pdev->dev, "oem parameter validation failed\n");
+ return NULL;
+ }
+
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ iport->isci_host = ihost;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->node);
+ }
shost = scsi_host_alloc(&isci_sht, sizeof(void *));
if (!shost)
return NULL;
- isci_host->shost = shost;
dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
"{%s, %s, %s, %s}\n",
- (is_cable_select_overridden() ? "* " : ""), isci_host->id,
- lookup_cable_names(decode_cable_selection(isci_host, 3)),
- lookup_cable_names(decode_cable_selection(isci_host, 2)),
- lookup_cable_names(decode_cable_selection(isci_host, 1)),
- lookup_cable_names(decode_cable_selection(isci_host, 0)));
+ (is_cable_select_overridden() ? "* " : ""), ihost->id,
+ lookup_cable_names(decode_cable_selection(ihost, 3)),
+ lookup_cable_names(decode_cable_selection(ihost, 2)),
+ lookup_cable_names(decode_cable_selection(ihost, 1)),
+ lookup_cable_names(decode_cable_selection(ihost, 0)));
- err = isci_host_init(isci_host);
+ err = isci_host_init(ihost);
if (err)
goto err_shost;
- SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
- isci_host->sas_ha.core.shost = shost;
+ SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
+ ihost->sas_ha.core.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
if (err)
goto err_shost;
- err = isci_register_sas_ha(isci_host);
+ err = isci_register_sas_ha(ihost);
if (err)
goto err_shost_remove;
- return isci_host;
+ return ihost;
err_shost_remove:
scsi_remove_host(shost);
@@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (!orom)
orom = isci_request_oprom(pdev);
- for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+ for (i = 0; orom && i < num_controllers(pdev); i++) {
if (sci_oem_parameters_validate(&orom->ctrl[i],
orom->hdr.version)) {
dev_warn(&pdev->dev,
@@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
pci_info->hosts[i] = h;
/* turn on DIF support */
- scsi_host_set_prot(h->shost,
+ scsi_host_set_prot(to_shost(h),
SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
+ scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
@@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
goto err_host_alloc;
for_each_isci_host(i, isci_host, pdev)
- scsi_scan_host(isci_host->shost);
+ scsi_scan_host(to_shost(isci_host));
return 0;
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fab3586840b5..18f43d4c30ba 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+ iphy->protocol = SAS_PROTOCOL_SSP;
}
static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
@@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
*/
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+ iphy->protocol = SAS_PROTOCOL_SATA;
}
/**
@@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code)
phy_to_host(iphy)->id, iphy->phy_index, \
phy_state_name(state), phy_event_name(code), code)
+
+void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+{
+ u32 val;
+
+ /* Extend timeout */
+ val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
+ val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
+ val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
+
+ writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
+}
+
enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
enum sci_phy_states state = iphy->sm.current_state_id;
@@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
iphy->is_in_link_training = true;
break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
default:
phy_event_dbg(iphy, state, event_code);
return SCI_FAILURE;
@@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
@@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
case SCU_EVENT_LINK_FAILURE:
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SAS_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SATA_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
*/
break;
case SCU_EVENT_SATA_PHY_DETECTED:
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+ iphy->protocol = SAS_PROTOCOL_SATA;
/* We have received the SATA PHY notification change state */
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
@@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_READY:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Set default timeout */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_BROADCAST_CHANGE:
+ case SCU_EVENT_BROADCAST_SES:
+ case SCU_EVENT_BROADCAST_RESERVED0:
+ case SCU_EVENT_BROADCAST_RESERVED1:
+ case SCU_EVENT_BROADCAST_EXPANDER:
+ case SCU_EVENT_BROADCAST_AEN:
/* Broadcast change received. Notify the port. */
if (phy_get_non_dummy_port(iphy) != NULL)
sci_port_broadcast_change_received(iphy->owning_port, iphy);
else
iphy->bcn_received_while_port_unassigned = true;
break;
+ case SCU_EVENT_BROADCAST_RESERVED3:
+ case SCU_EVENT_BROADCAST_RESERVED4:
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
@@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
scu_link_layer_start_oob(iphy);
/* We don't know what kind of phy we are going to be just yet */
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->protocol = SAS_PROTOCOL_NONE;
iphy->bcn_received_while_port_unassigned = false;
if (iphy->sm.previous_state_id == SCI_PHY_READY)
@@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
*/
sci_port_deactivate_phy(iphy->owning_port, iphy, false);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
scu_link_layer_tx_hard_reset(iphy);
} else {
/* The SCU does not need to have a discrete reset state so
@@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy,
iphy->owning_port = iport;
iphy->phy_index = phy_index;
iphy->bcn_received_while_port_unassigned = false;
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->protocol = SAS_PROTOCOL_NONE;
iphy->link_layer_registers = NULL;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
@@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
switch (func) {
case PHY_FUNC_DISABLE:
spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
sci_phy_stop(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;
case PHY_FUNC_LINK_RESET:
spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
sci_phy_stop(iphy);
sci_phy_start(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 0e45833ba06d..45fecfa36a98 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -76,13 +76,6 @@
*/
#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
-enum sci_phy_protocol {
- SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
- SCIC_SDS_PHY_PROTOCOL_SAS,
- SCIC_SDS_PHY_PROTOCOL_SATA,
- SCIC_SDS_MAX_PHY_PROTOCOLS
-};
-
/**
* isci_phy - hba local phy infrastructure
* @sm:
@@ -95,7 +88,7 @@ struct isci_phy {
struct sci_base_state_machine sm;
struct isci_port *owning_port;
enum sas_linkrate max_negotiated_speed;
- enum sci_phy_protocol protocol;
+ enum sas_protocol protocol;
u8 phy_index;
bool bcn_received_while_port_unassigned;
bool is_in_link_training;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 5fada73b71ff..2fb85bf75449 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
sci_port_get_properties(iport, &properties);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+ if (iphy->protocol == SAS_PROTOCOL_SATA) {
u64 attached_sas_address;
iphy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
memcpy(&iphy->sas_phy.attached_sas_addr,
&attached_sas_address, sizeof(attached_sas_address));
- } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
iphy->sas_phy.oob_mode = SAS_OOB_MODE;
iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
@@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
if (isci_phy->sas_phy.port &&
isci_phy->sas_phy.port->num_phys == 1) {
/* change the state for all devices on this port. The
- * next task sent to this device will be returned as
- * SAS_TASK_UNDELIVERED, and the scsi mid layer will
- * remove the target
- */
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
list_for_each_entry(isci_device,
&isci_port->remote_dev_list,
node) {
@@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
*/
iphy = sci_port_get_a_connected_phy(iport);
if (iphy) {
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+ if (iphy->protocol != SAS_PROTOCOL_SATA) {
sci_phy_get_attached_sas_address(iphy, sas);
} else {
sci_phy_get_sas_address(iphy, sas);
@@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport,
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
+ if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
@@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport)
* wide ports and direct attached phys. Since there are no wide ported SATA
* devices this could become an invalid port configuration.
*/
-bool sci_port_link_detected(
- struct isci_port *iport,
- struct isci_phy *iphy)
+bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ (iphy->protocol == SAS_PROTOCOL_SATA)) {
if (sci_port_is_wide(iport)) {
sci_port_invalid_link_up(iport, iphy);
return false;
@@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
enum sci_status status;
enum sci_port_states state;
+ sci_port_bcn_enable(iport);
+
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED: {
@@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
}
+void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
+{
+ int phy_index;
+ u32 phy_mask = iport->active_phy_mask;
+
+ if (timeout)
+ ++iport->hang_detect_users;
+ else if (iport->hang_detect_users > 1)
+ --iport->hang_detect_users;
+ else
+ iport->hang_detect_users = 0;
+
+ if (timeout || (iport->hang_detect_users == 0)) {
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask >> phy_index) & 1) {
+ writel(timeout,
+ &iport->phy_table[phy_index]
+ ->link_layer_registers
+ ->link_layer_hang_detection_timeout);
+ }
+ }
+ }
+}
/* --------------------------------------------------------------------------- */
static const struct sci_base_state sci_port_state_table[] = {
@@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->started_request_count = 0;
iport->assigned_device_count = 0;
+ iport->hang_detect_users = 0;
iport->reserved_rni = SCU_DUMMY_INDEX;
iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
@@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->phy_table[index] = NULL;
}
-void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
-{
- INIT_LIST_HEAD(&iport->remote_dev_list);
- INIT_LIST_HEAD(&iport->domain_dev_list);
- iport->isci_host = ihost;
-}
-
void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
@@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
__func__, iport, status);
}
-
- /* If the hard reset for the port has failed, consider this
- * the same as link failures on all phys in the port.
- */
- if (ret != TMF_RESP_FUNC_COMPLETE) {
-
- dev_err(&ihost->pdev->dev,
- "%s: iport = %p; hard reset failed "
- "(0x%x) - driving explicit link fail for all phys\n",
- __func__, iport, iport->hard_reset_status);
- }
return ret;
}
@@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy)
struct isci_host *ihost = phy->ha->lldd_ha;
struct isci_phy *iphy = to_iphy(phy);
struct asd_sas_port *port = phy->port;
- struct isci_port *iport;
+ struct isci_port *iport = NULL;
unsigned long flags;
int i;
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 6b56240c2051..861e8f72811b 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -97,7 +97,6 @@ enum isci_status {
struct isci_port {
struct isci_host *isci_host;
struct list_head remote_dev_list;
- struct list_head domain_dev_list;
#define IPORT_RESET_PENDING 0
unsigned long state;
enum sci_status hard_reset_status;
@@ -112,6 +111,7 @@ struct isci_port {
u16 reserved_tag;
u32 started_request_count;
u32 assigned_device_count;
+ u32 hang_detect_users;
u32 not_ready_reason;
struct isci_phy *phy_table[SCI_MAX_PHYS];
struct isci_host *owning_controller;
@@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
+void sci_port_set_hang_detection_timeout(
+ struct isci_port *isci_port,
+ u32 timeout);
+
void isci_port_formed(struct asd_sas_phy *);
void isci_port_deformed(struct asd_sas_phy *);
-void isci_port_init(
- struct isci_port *port,
- struct isci_host *host,
- int index);
-
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
int isci_ata_check_ready(struct domain_device *dev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e9544cbe5..cd962da4a57a 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
* down event or a link up event where we can not yet tell to which a phy
* belongs.
*/
-static void sci_apc_agent_start_timer(
- struct sci_port_configuration_agent *port_agent,
- u32 timeout)
+static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
{
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
port_agent->timer_pending = true;
sci_mod_timer(&port_agent->timer, timeout);
}
@@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data)
&ihost->phys[index], false);
}
+ if (is_controller_start_complete(ihost))
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+
done:
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
@@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct(
}
}
+bool is_port_config_apc(struct isci_host *ihost)
+{
+ return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
+}
+
enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent)
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 9b8117b9d756..4d95654c3fd4 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
return rom;
}
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
- struct isci_orom *orom, int scu_index)
-{
- /* check for valid inputs */
- if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
- scu_index > orom->hdr.num_elements || !oem)
- return -EINVAL;
-
- *oem = orom->ctrl[scu_index];
- return 0;
-}
-
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
{
struct isci_orom *orom = NULL, *data;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index bb0e9d4d97c9..e08b578241f8 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
- struct isci_orom *orom, int scu_index);
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 7eb0ccd45fe6..97f3ceb8d724 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers {
#define SCU_SAS_LLCTL_GEN_BIT(name) \
SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF)
+
+#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
+
/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
#define SCU_PSZGCR_OFFSET 0x00E4
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 8f501b0a81d6..c3aa6c5457b9 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state)
}
#undef C
-/**
- * isci_remote_device_not_ready() - This function is called by the ihost when
- * the remote device is not ready. We mark the isci device as ready (not
- * "ready_for_io") and signal the waiting proccess.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device
- *
- * sci_lock is held on entrance to this function.
- */
-static void isci_remote_device_not_ready(struct isci_host *ihost,
- struct isci_remote_device *idev, u32 reason)
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason)
{
- struct isci_request *ireq;
-
- dev_dbg(&ihost->pdev->dev,
- "%s: isci_device = %p\n", __func__, idev);
-
- switch (reason) {
- case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
- set_bit(IDEV_GONE, &idev->flags);
- break;
- case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
- set_bit(IDEV_IO_NCQERROR, &idev->flags);
-
- /* Kill all outstanding requests for the device. */
- list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: isci_device = %p request = %p\n",
- __func__, idev, ireq);
-
- sci_controller_terminate_request(ihost,
- idev,
- ireq);
- }
- /* Fall through into the default case... */
- default:
- clear_bit(IDEV_IO_READY, &idev->flags);
- break;
- }
+ return sci_remote_node_context_suspend(&idev->rnc, reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
}
/**
@@ -133,18 +98,29 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
wake_up(&ihost->eventq);
}
-/* called once the remote node context is ready to be freed.
- * The remote device can now report that its stop operation is complete. none
- */
-static void rnc_destruct_done(void *_dev)
+static enum sci_status sci_remote_device_terminate_req(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ int check_abort,
+ struct isci_request *ireq)
{
- struct isci_remote_device *idev = _dev;
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ (ireq->target_device != idev) ||
+ (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
+ return SCI_SUCCESS;
- BUG_ON(idev->started_request_count != 0);
- sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
+ __func__, idev, idev->flags, ireq, ireq->target_device);
+
+ set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+
+ return sci_controller_terminate_request(ihost, idev, ireq);
}
-static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+static enum sci_status sci_remote_device_terminate_reqs_checkabort(
+ struct isci_remote_device *idev,
+ int chk)
{
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status = SCI_SUCCESS;
@@ -154,18 +130,210 @@ static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_d
struct isci_request *ireq = ihost->reqs[i];
enum sci_status s;
- if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
- ireq->target_device != idev)
- continue;
-
- s = sci_controller_terminate_request(ihost, idev, ireq);
+ s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
if (s != SCI_SUCCESS)
status = s;
}
+ return status;
+}
+
+static bool isci_compare_suspendcount(
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ smp_rmb();
+
+ /* Check for a change in the suspend count, or the RNC
+ * being destroyed.
+ */
+ return (localcount != idev->rnc.suspend_count)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+}
+
+static bool isci_check_reqterm(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+static bool isci_check_devempty(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && idev->started_request_count == 0;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+ unsigned long flags;
+ u32 rnc_suspend_count;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (isci_get_device(idev) == NULL) {
+ dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
+ __func__, idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ status = SCI_FAILURE;
+ } else {
+ /* If already suspended, don't wait for another suspension. */
+ smp_rmb();
+ rnc_suspend_count
+ = sci_remote_node_context_is_suspended(&idev->rnc)
+ ? 0 : idev->rnc.suspend_count;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p, ireq=%p; started_request_count=%d, "
+ "rnc_suspend_count=%d, rnc.suspend_count=%d"
+ "about to wait\n",
+ __func__, idev, ireq, idev->started_request_count,
+ rnc_suspend_count, idev->rnc.suspend_count);
+
+ #define MAX_SUSPEND_MSECS 10000
+ if (ireq) {
+ /* Terminate a specific TC. */
+ set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ sci_remote_device_terminate_req(ihost, idev, 0, ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_reqterm(ihost, idev, ireq,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, rnc.suspend_count=%d "
+ "RNC: current state %s, current "
+ "suspend_type %x dest state %d;\n"
+ "ireq=%p, ireq->flags = %lx\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count, idev->rnc.suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state,
+ ireq, ireq->flags);
+ }
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ } else {
+ /* Terminate all TCs. */
+ sci_remote_device_terminate_requests(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_devempty(ihost, idev,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, "
+ "RNC: current state %s, "
+ "rnc.suspend_count=%d, current "
+ "suspend_type %x dest state %d\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_count,
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state);
+ }
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
+ __func__, idev);
+ isci_put_device(idev);
+ }
return status;
}
+/**
+* isci_remote_device_not_ready() - This function is called by the ihost when
+* the remote device is not ready. We mark the isci device as ready (not
+* "ready_for_io") and signal the waiting proccess.
+* @isci_host: This parameter specifies the isci host object.
+* @isci_device: This parameter specifies the remote device
+*
+* sci_lock is held on entrance to this function.
+*/
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 reason)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Suspend the remote device so the I/O can be terminated. */
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+ /* Kill all outstanding requests for the device. */
+ sci_remote_device_terminate_requests(idev);
+
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 0);
+}
+
enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
u32 timeout)
{
@@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
case SCI_SMP_DEV_IDLE:
case SCI_SMP_DEV_CMD:
sci_change_state(sm, SCI_DEV_STOPPING);
- if (idev->started_request_count == 0) {
+ if (idev->started_request_count == 0)
sci_remote_node_context_destruct(&idev->rnc,
- rnc_destruct_done, idev);
- return SCI_SUCCESS;
- } else
- return sci_remote_device_terminate_requests(idev);
- break;
+ rnc_destruct_done,
+ idev);
+ else {
+ sci_remote_device_suspend(
+ idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ sci_remote_device_terminate_requests(idev);
+ }
+ return SCI_SUCCESS;
case SCI_DEV_STOPPING:
/* All requests should have been terminated, but if there is an
* attempt to stop a device already in the stopping state, then
@@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
return SCI_SUCCESS;
}
-enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
- u32 suspend_type)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- if (state != SCI_STP_DEV_CMD) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- }
-
- return sci_remote_node_context_suspend(&idev->rnc,
- suspend_type, NULL, NULL);
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
@@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev)
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
u32 event_code)
{
+ enum sci_status status;
struct sci_base_state_machine *sm = &idev->sm;
enum sci_remote_device_states state = sm->current_state_id;
- enum sci_status status;
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_OPS_MISC:
@@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
status = SCI_SUCCESS;
/* Suspend the associated RNC */
- sci_remote_node_context_suspend(&idev->rnc,
- SCI_SOFTWARE_SUSPENSION,
- NULL, NULL);
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
@@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
if (status != SCI_SUCCESS)
return status;
+ /* Decode device-specific states that may require an RNC resume during
+ * normal operation. When the abort path is active, these resumes are
+ * managed when the abort path exits.
+ */
if (state == SCI_STP_DEV_ATAPI_ERROR) {
/* For ATAPI error state resume the RNC right away. */
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
@@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- status = sci_remote_node_context_start_task(&idev->rnc, ireq);
- if (status != SCI_SUCCESS)
- goto out;
-
status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
goto out;
@@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
* the correct action when the remote node context is suspended
* and later resumed.
*/
- sci_remote_node_context_suspend(&idev->rnc,
- SCI_SOFTWARE_SUSPENSION, NULL, NULL);
- sci_remote_node_context_resume(&idev->rnc,
- sci_remote_device_continue_request,
- idev);
+ sci_remote_device_suspend(idev,
+ SCI_SW_SUSPEND_LINKHANG_DETECT);
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ sci_remote_device_continue_request, idev);
out:
sci_remote_device_start_request(idev, ireq, status);
@@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ /* Resume the RNC as needed: */
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ NULL, NULL);
if (status != SCI_SUCCESS)
break;
@@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
* here should go through isci_remote_device_nuke_requests.
* If we hit this condition, we will need a way to complete
* io requests in process */
- BUG_ON(!list_empty(&idev->reqs_in_process));
+ BUG_ON(idev->started_request_count > 0);
sci_remote_device_destruct(idev);
list_del_init(&idev->node);
@@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
- sci_remote_node_context_suspend(
- &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
}
static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
@@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
{
enum sci_status status;
struct sci_port_properties properties;
- struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
- /*
- * This information is request to determine how many remote node context
- * entries will be needed to store the remote node.
- */
- idev->is_direct_attached = true;
-
sci_port_get_properties(iport, &properties);
/* Get accurate port width from port's phy mask for a DA device. */
idev->device_port_width = hweight32(properties.phy_mask);
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
- idev,
- &idev->rnc.remote_node_index);
+ idev,
+ &idev->rnc.remote_node_index);
if (status != SCI_SUCCESS)
return status;
- if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
- (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
- /* pass */;
- else
- return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
return SCI_SUCCESS;
@@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
- (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
- /* pass */;
- else
- return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
- /*
- * For SAS-2 the physical link rate is actually a logical link
+ /* For SAS-2 the physical link rate is actually a logical link
* rate that incorporates multiplexing. The SCU doesn't
* incorporate multiplexing and for the purposes of the
* connection the logical link rate is that same as the
* physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
- * one another, so this code works for both situations. */
+ * one another, so this code works for both situations.
+ */
idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
dev->linkrate);
@@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
return SCI_SUCCESS;
}
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status;
+
+ status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
+ __func__, status);
+ return status;
+}
+
+static void isci_remote_device_resume_from_abort_complete(void *cbparam)
+{
+ struct isci_remote_device *idev = cbparam;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ scics_sds_remote_node_context_callback abort_resume_cb =
+ idev->abort_resume_cb;
+
+ dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
+ __func__, abort_resume_cb);
+
+ if (abort_resume_cb != NULL) {
+ idev->abort_resume_cb = NULL;
+ abort_resume_cb(idev->abort_resume_cbparam);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+static bool isci_remote_device_test_resume_done(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
+ || test_bit(IDEV_STOP_PENDING, &idev->flags)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return done;
+}
+
+void isci_remote_device_wait_for_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
+ __func__, idev);
+
+ #define MAX_RESUME_MSECS 10000
+ if (!wait_event_timeout(ihost->eventq,
+ isci_remote_device_test_resume_done(ihost, idev),
+ msecs_to_jiffies(MAX_RESUME_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
+ "resume: %p\n", __func__, idev);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
+ __func__, idev);
+}
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status = SCI_SUCCESS;
+ int destroyed;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* Preserve any current resume callbacks, for instance from other
+ * resumptions.
+ */
+ idev->abort_resume_cb = idev->rnc.user_callback;
+ idev->abort_resume_cbparam = idev->rnc.user_cookie;
+ set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ if (!destroyed)
+ status = sci_remote_device_resume(
+ idev, isci_remote_device_resume_from_abort_complete,
+ idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!destroyed && (status == SCI_SUCCESS))
+ isci_remote_device_wait_for_resume_from_abort(ihost, idev);
+ else
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ return status;
+}
+
/**
* sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
@@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
* the device when there have been no phys added to it.
*/
static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
- u32 timeout)
+ u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
enum sci_remote_device_states state = sm->current_state_id;
@@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
return SCI_FAILURE_INVALID_STATE;
}
- status = sci_remote_node_context_resume(&idev->rnc,
- remote_device_resume_done,
- idev);
+ status = sci_remote_device_resume(idev, remote_device_resume_done,
+ idev);
if (status != SCI_SUCCESS)
return status;
@@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
return status;
}
-void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
-{
- DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev = %p\n", __func__, idev);
-
- /* Cleanup all requests pending for this device. */
- isci_terminate_pending_requests(ihost, idev);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev = %p, done\n", __func__, idev);
-}
-
/**
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
@@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
return NULL;
}
-
- if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
- return NULL;
-
if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
return NULL;
@@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
spin_lock_irqsave(&ihost->scic_lock, flags);
idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
set_bit(IDEV_GONE, &idev->flags);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- /* Kill all outstanding requests. */
- isci_remote_device_nuke_requests(ihost, idev);
set_bit(IDEV_STOP_PENDING, &idev->flags);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
status = sci_remote_device_stop(idev, 50);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
@@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
else
wait_for_device_stop(ihost, idev);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p, waiting done.\n", __func__, idev);
+
return status;
}
@@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev)
return status == SCI_SUCCESS ? 0 : -ENODEV;
}
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ /* Put the device into suspension. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Terminate and wait for the completions. */
+ status = isci_remote_device_terminate_requests(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_remote_device_terminate_requests(%p) "
+ "returned %d!\n",
+ __func__, idev, status);
+
+ /* NOTE: RNC resumption is left to the caller! */
+ return status;
+}
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
+}
+
+enum sci_status sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 1);
+}
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return status;
+}
+
+void isci_dev_set_hang_detection_timeout(
+ struct isci_remote_device *idev,
+ u32 timeout)
+{
+ if (dev_is_sata(idev->domain_dev)) {
+ if (timeout) {
+ if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Already enabled. */
+ } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Not enabled. */
+
+ sci_port_set_hang_detection_timeout(idev->owning_port,
+ timeout);
+ }
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 58637ee08f55..7674caae1d88 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -85,27 +85,38 @@ struct isci_remote_device {
#define IDEV_GONE 3
#define IDEV_IO_READY 4
#define IDEV_IO_NCQERROR 5
+ #define IDEV_RNC_LLHANG_ENABLED 6
+ #define IDEV_ABORT_PATH_ACTIVE 7
+ #define IDEV_ABORT_PATH_RESUME_PENDING 8
unsigned long flags;
struct kref kref;
struct isci_port *isci_port;
struct domain_device *domain_dev;
struct list_head node;
- struct list_head reqs_in_process;
struct sci_base_state_machine sm;
u32 device_port_width;
enum sas_linkrate connection_rate;
- bool is_direct_attached;
struct isci_port *owning_port;
struct sci_remote_node_context rnc;
/* XXX unify with device reference counting and delete */
u32 started_request_count;
struct isci_request *working_request;
u32 not_ready_reason;
+ scics_sds_remote_node_context_callback abort_resume_cb;
+ void *abort_resume_cbparam;
};
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_get_device(
+ struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_get(&idev->kref);
+ return idev;
+}
+
static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
{
struct isci_remote_device *idev = dev->lldd_dev;
@@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_
idev->started_request_count--;
}
+void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
+
enum sci_status sci_remote_device_frame_handler(
struct isci_remote_device *idev,
u32 frame_index);
@@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status sci_remote_device_suspend(
- struct isci_remote_device *idev,
- u32 suspend_type);
-
void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request);
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev);
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status
+sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason);
#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 3a9463481f38..1910100638a2 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -52,7 +52,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+#include <scsi/sas_ata.h>
#include "host.h"
#include "isci.h"
#include "remote_device.h"
@@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready(
return false;
}
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_TX_RX_SUSPENDED)
+ return true;
+ return false;
+}
+
static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
{
if (id < ihost->remote_node_entries &&
@@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.arbitration_wait_time = 0;
- if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (dev_is_sata(dev)) {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.stp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
@@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.oaf_source_zone_group = 0;
rnc->ssp.oaf_more_compatibility_features = 0;
}
-
/**
*
* @sci_rnc:
@@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
static void sci_remote_node_context_setup_to_resume(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
- void *callback_parameter)
+ void *callback_parameter,
+ enum sci_remote_node_context_destination_state dest_param)
{
- if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
- sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
- sci_rnc->user_callback = callback;
- sci_rnc->user_cookie = callback_parameter;
+ if (sci_rnc->destination_state != RNC_DEST_FINAL) {
+ sci_rnc->destination_state = dest_param;
+ if (callback != NULL) {
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
}
}
-static void sci_remote_node_context_setup_to_destory(
+static void sci_remote_node_context_setup_to_destroy(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
- sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+ struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
+
+ sci_rnc->destination_state = RNC_DEST_FINAL;
sci_rnc->user_callback = callback;
sci_rnc->user_cookie = callback_parameter;
+
+ wake_up(&ihost->eventq);
}
/**
@@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user(
static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
{
- if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ switch (rnc->destination_state) {
+ case RNC_DEST_READY:
+ case RNC_DEST_SUSPENDED_RESUME:
+ rnc->destination_state = RNC_DEST_READY;
+ /* Fall through... */
+ case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
- rnc->user_cookie);
+ rnc->user_cookie);
+ break;
+ default:
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+ break;
+ }
}
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
@@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no
rnc_buffer->ssp.is_valid = true;
- if (!idev->is_direct_attached &&
- (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+ if (dev_is_sata(dev) && dev->parent) {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
} else {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
- if (idev->is_direct_attached)
+ if (!dev->parent)
sci_port_setup_transports(idev->owning_port,
sci_rnc->remote_node_index);
}
@@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_
static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_remote_node_context_notify_user(rnc);
+
+ smp_wmb();
+ wake_up(&ihost->eventq);
}
}
@@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ /* Terminate all outstanding requests. */
+ sci_remote_device_terminate_requests(rnc_to_dev(rnc));
sci_remote_node_context_invalidate_context_buffer(rnc);
}
@@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
* resume because of a target reset we also need to update
* the STPTLDARNI register with the RNi of the device
*/
- if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
- idev->is_direct_attached)
- sci_port_setup_transports(idev->owning_port,
- rnc->remote_node_index);
+ if (dev_is_sata(dev) && !dev->parent)
+ sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
}
@@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ enum sci_remote_node_context_destination_state dest_select;
+ int tell_user = 1;
+
+ dest_select = rnc->destination_state;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ if ((dest_select == RNC_DEST_SUSPENDED) ||
+ (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
+ sci_remote_node_context_suspend(
+ rnc, rnc->suspend_reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
- if (rnc->user_callback)
+ if (dest_select == RNC_DEST_SUSPENDED_RESUME)
+ tell_user = 0; /* Wait until ready again. */
+ }
+ if (tell_user)
sci_remote_node_context_notify_user(rnc);
}
@@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta
static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 new_count = rnc->suspend_count + 1;
+
+ if (new_count == 0)
+ rnc->suspend_count = 1;
+ else
+ rnc->suspend_count = new_count;
+ smp_wmb();
+ /* Terminate outstanding requests pending abort. */
+ sci_remote_device_abort_requests_pending_abort(idev);
+
+ wake_up(&ihost->eventq);
sci_remote_node_context_continue_state_transitions(rnc);
}
+static void sci_remote_node_context_await_suspend_state_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc
+ = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+
+ if (dev_is_sata(idev->domain_dev))
+ isci_dev_set_hang_detection_timeout(idev, 0);
+}
+
static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_INITIAL] = {
.enter_state = sci_remote_node_context_initial_state_enter,
@@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
},
- [SCI_RNC_AWAIT_SUSPENSION] = { },
+ [SCI_RNC_AWAIT_SUSPENSION] = {
+ .exit_state = sci_remote_node_context_await_suspend_state_exit,
+ },
};
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
@@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
memset(rnc, 0, sizeof(struct sci_remote_node_context));
rnc->remote_node_index = remote_node_index;
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
}
@@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
u32 event_code)
{
enum scis_sds_remote_node_context_states state;
+ u32 next_state;
state = sci_rnc->sm.current_state_id;
switch (state) {
@@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
break;
case SCI_RNC_INVALIDATING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
- if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
- state = SCI_RNC_INITIAL;
+ if (sci_rnc->destination_state == RNC_DEST_FINAL)
+ next_state = SCI_RNC_INITIAL;
else
- state = SCI_RNC_POSTING;
- sci_change_state(&sci_rnc->sm, state);
+ next_state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, next_state);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being invalidated anyway */
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being "
"invalidated.\n", __func__, sci_rnc);
@@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being resumed anyway */
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being resumed.\n",
__func__, sci_rnc);
@@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
default:
goto out;
@@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
case SCI_RNC_AWAIT_SUSPENSION:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
- sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ next_state = SCI_RNC_TX_SUSPENDED;
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
- sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ next_state = SCI_RNC_TX_RX_SUSPENDED;
break;
default:
goto out;
}
+ if (sci_rnc->suspend_type == scu_get_event_type(event_code))
+ sci_change_state(&sci_rnc->sm, next_state);
break;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
out:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: code: %#x state: %d\n", __func__, event_code, state);
+ "%s: code: %#x state: %s\n", __func__, event_code,
+ rnc_state_name(state));
return SCI_FAILURE;
}
@@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
- sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
- case SCI_RNC_AWAIT_SUSPENSION:
- sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
case SCI_RNC_INITIAL:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
/* We have decided that the destruct request on the remote node context
* can not fail since it is either in the initial/destroyed state or is
* can be destroyed.
@@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
-enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
- u32 suspend_type,
- scics_sds_remote_node_context_callback cb_fn,
- void *cb_p)
+enum sci_status sci_remote_node_context_suspend(
+ struct sci_remote_node_context *sci_rnc,
+ enum sci_remote_node_suspension_reasons suspend_reason,
+ u32 suspend_type)
{
- enum scis_sds_remote_node_context_states state;
+ enum scis_sds_remote_node_context_states state
+ = sci_rnc->sm.current_state_id;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ enum sci_status status = SCI_FAILURE_INVALID_STATE;
+ enum sci_remote_node_context_destination_state dest_param =
+ RNC_DEST_UNSPECIFIED;
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: current state %s, current suspend_type %x dest state %d,"
+ " arg suspend_reason %d, arg suspend_type %x",
+ __func__, rnc_state_name(state), sci_rnc->suspend_type,
+ sci_rnc->destination_state, suspend_reason,
+ suspend_type);
+
+ /* Disable automatic state continuations if explicitly suspending. */
+ if ((suspend_reason == SCI_HW_SUSPEND) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ dest_param = sci_rnc->destination_state;
- state = sci_rnc->sm.current_state_id;
- if (state != SCI_RNC_READY) {
+ switch (state) {
+ case SCI_RNC_READY:
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (sci_rnc->destination_state == RNC_DEST_FINAL) {
+ dev_warn(scirdev_to_dev(idev),
+ "%s: already destroying %p\n",
+ __func__, sci_rnc);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_RESUMING:
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_POSTING:
+ /* Set the destination state to AWAIT - this signals the
+ * entry into the SCI_RNC_READY state that a suspension
+ * needs to be done immediately.
+ */
+ if (sci_rnc->destination_state != RNC_DEST_FINAL)
+ sci_rnc->destination_state = RNC_DEST_SUSPENDED;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+ return SCI_SUCCESS;
+
+ case SCI_RNC_TX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_TX_RX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ || (suspend_type == sci_rnc->suspend_type))
+ return SCI_SUCCESS;
+ break;
+ default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
+ sci_rnc->destination_state = dest_param;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+
+ if (status == SCI_SUCCESS) { /* Already in the destination state? */
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ wake_up_all(&ihost->eventq); /* Let observers look. */
+ return SCI_SUCCESS;
+ }
+ if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
+ (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
- sci_rnc->user_callback = cb_fn;
- sci_rnc->user_cookie = cb_p;
- sci_rnc->suspension_code = suspend_type;
+ if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
+ isci_dev_set_hang_detection_timeout(idev, 0x00000001);
- if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
- sci_remote_device_post_request(rnc_to_dev(sci_rnc),
- SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+ sci_remote_device_post_request(
+ idev, SCI_SOFTWARE_SUSPEND_CMD);
}
+ if (state != SCI_RNC_AWAIT_SUSPENSION)
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
- sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
return SCI_SUCCESS;
}
@@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s
void *cb_p)
{
enum scis_sds_remote_node_context_states state;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
state = sci_rnc->sm.current_state_id;
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
+ "dev resume path %s\n",
+ __func__, rnc_state_name(state), cb_fn, cb_p,
+ sci_rnc->destination_state,
+ test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
+ ? "<abort active>" : "<normal>");
+
switch (state) {
case SCI_RNC_INITIAL:
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- sci_remote_node_context_construct_buffer(sci_rnc);
- sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ }
return SCI_SUCCESS;
+
case SCI_RNC_POSTING:
case SCI_RNC_INVALIDATING:
case SCI_RNC_RESUMING:
- if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
- return SCI_FAILURE_INVALID_STATE;
-
- sci_rnc->user_callback = cb_fn;
- sci_rnc->user_cookie = cb_p;
+ /* We are still waiting to post when a resume was
+ * requested.
+ */
+ switch (sci_rnc->destination_state) {
+ case RNC_DEST_SUSPENDED:
+ case RNC_DEST_SUSPENDED_RESUME:
+ /* Previously waiting to suspend after posting.
+ * Now continue onto resumption.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_SUSPENDED_RESUME);
+ break;
+ default:
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ break;
+ }
return SCI_SUCCESS;
- case SCI_RNC_TX_SUSPENDED: {
- struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
- struct domain_device *dev = idev->domain_dev;
-
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
-
- /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
- if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
- if (idev->is_direct_attached) {
- /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- } else {
- sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ {
+ struct domain_device *dev = idev->domain_dev;
+ /* If this is an expander attached SATA device we must
+ * invalidate and repost the RNC since this is the only
+ * way to clear the TCi to NCQ tag mapping table for
+ * the RNi. All other device types we can just resume.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
+
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ if ((dev_is_sata(dev) && dev->parent) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_INVALIDATING);
+ else
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_RESUMING);
}
- } else
- return SCI_FAILURE;
+ }
return SCI_SUCCESS;
- }
- case SCI_RNC_TX_RX_SUSPENDED:
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- return SCI_FAILURE_INVALID_STATE;
+
case SCI_RNC_AWAIT_SUSPENSION:
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context
case SCI_RNC_TX_RX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
default:
- break;
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
}
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: requested to start IO while still resuming, %d\n",
- __func__, state);
- return SCI_FAILURE_INVALID_STATE;
}
-enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
- struct isci_request *ireq)
+enum sci_status sci_remote_node_context_start_task(
+ struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status = sci_remote_node_context_resume(sci_rnc,
+ cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: resume failed: %d\n", __func__, status);
+ return status;
+}
+
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc)
{
enum scis_sds_remote_node_context_states state;
state = sci_rnc->sm.current_state_id;
switch (state) {
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ return 1;
+ case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
- case SCI_RNC_AWAIT_SUSPENSION:
- return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
- case SCI_RNC_TX_RX_SUSPENDED:
- sci_remote_node_context_resume(sci_rnc, NULL, NULL);
- return SCI_SUCCESS;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ case SCI_RNC_INITIAL:
+ return 0;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
- return SCI_FAILURE_INVALID_STATE;
+ return 0;
}
}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a241e0f4c865..a703b9ce0c2c 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -75,8 +75,13 @@
*/
#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
-#define SCU_HARDWARE_SUSPENSION (0)
-#define SCI_SOFTWARE_SUSPENSION (1)
+enum sci_remote_node_suspension_reasons {
+ SCI_HW_SUSPEND,
+ SCI_SW_SUSPEND_NORMAL,
+ SCI_SW_SUSPEND_LINKHANG_DETECT
+};
+#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
+#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
struct isci_request;
struct isci_remote_device;
@@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
* node context.
*/
enum sci_remote_node_context_destination_state {
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+ RNC_DEST_UNSPECIFIED,
+ RNC_DEST_READY,
+ RNC_DEST_FINAL,
+ RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */
+ RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
+ * or invalidating and already suspending.
+ */
};
/**
@@ -156,10 +165,12 @@ struct sci_remote_node_context {
u16 remote_node_index;
/**
- * This field is the recored suspension code or the reason for the remote node
+ * This field is the recored suspension type of the remote node
* context suspension.
*/
- u32 suspension_code;
+ u32 suspend_type;
+ enum sci_remote_node_suspension_reasons suspend_reason;
+ u32 suspend_count;
/**
* This field is true if the remote node context is resuming from its current
@@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc);
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
+
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code);
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
@@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
void *callback_parameter);
enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
- scics_sds_remote_node_context_callback cb_fn,
- void *cb_p);
+ u32 suspension_code);
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
- struct isci_request *ireq);
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc);
+static inline bool sci_remote_node_context_is_being_destroyed(
+ struct sci_remote_node_context *sci_rnc)
+{
+ return (sci_rnc->destination_state == RNC_DEST_FINAL)
+ || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
+ && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
+}
#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2def1e3960f6..7a0431c73493 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
if (idx == 0) {
offset = (void *) &ireq->tc->sgl_pair_ab -
(void *) &ihost->task_context_table[0];
- return ihost->task_context_dma + offset;
+ return ihost->tc_dma + offset;
} else if (idx == 1) {
offset = (void *) &ireq->tc->sgl_pair_cd -
(void *) &ihost->task_context_table[0];
- return ihost->task_context_dma + offset;
+ return ihost->tc_dma + offset;
}
return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
@@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i
{
struct sas_task *task = isci_request_access_task(ireq);
- ireq->protocol = SCIC_SSP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_SSP;
scu_ssp_io_request_construct_task_context(ireq,
task->data_dir,
@@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
bool copy = false;
struct sas_task *task = isci_request_access_task(ireq);
- ireq->protocol = SCIC_STP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_STP;
copy = (task->data_dir == DMA_NONE) ? false : true;
@@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq)
switch (state) {
case SCI_REQ_CONSTRUCTED:
+ /* Set to make sure no HW terminate posting is done: */
+ set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq)
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
case SCI_REQ_ATAPI_WAIT_D2H:
case SCI_REQ_ATAPI_WAIT_TC_COMP:
- sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- return SCI_SUCCESS;
+ /* Fall through and change state to ABORTING... */
case SCI_REQ_TASK_WAIT_TC_RESP:
/* The task frame was already confirmed to have been
* sent by the SCU HW. Since the state machine is
@@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq)
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- return SCI_SUCCESS;
+ /* Fall through and handle like ABORTING... */
case SCI_REQ_ABORTING:
- /* If a request has a termination requested twice, return
- * a failure indication, since HW confirmation of the first
- * abort is still outstanding.
+ if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
+ set_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ else
+ clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ /* If the request is only waiting on the remote device
+ * suspension, return SUCCESS so the caller will wait too.
*/
+ return SCI_SUCCESS;
case SCI_REQ_COMPLETED:
default:
dev_warn(&ireq->owning_controller->pdev->dev,
"%s: SCIC IO Request requested to abort while in wrong "
- "state %d\n",
- __func__,
- ireq->sm.current_state_id);
+ "state %d\n", __func__, ireq->sm.current_state_id);
break;
}
@@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq,
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
- if (ireq->protocol == SCIC_STP_PROTOCOL) {
+ if (ireq->protocol == SAS_PROTOCOL_STP) {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
@@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
*/
if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
sci_remote_device_suspend(ireq->target_device,
- SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ SCI_SW_SUSPEND_NORMAL);
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
@@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
* - this comes only for B0
*/
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
- sci_remote_device_suspend(ireq->target_device,
- SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
- /* Fall through to the default case */
default:
/* All other completion status cause the IO to be complete. */
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
return status;
}
+static int sci_request_smp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_smp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SMP suspend conditions. */
+}
+
+static int sci_request_ssp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_ssp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SSP suspend conditions. */
+}
+
+static int sci_request_stpsata_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LL_R_ERR:
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+
+static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ case SCU_TASK_DONE_LL_LF_TERM:
+ case SCU_TASK_DONE_BREAK_RCVD:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ return 1;
+ }
+ return 0;
+}
+
+static void sci_request_handle_suspending_completions(
+ struct isci_request *ireq,
+ u32 completion_code)
+{
+ int is_tx = 0;
+ int is_tx_rx = 0;
+
+ switch (ireq->protocol) {
+ case SAS_PROTOCOL_SMP:
+ is_tx = sci_request_smp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_SSP:
+ is_tx = sci_request_ssp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_STP:
+ is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx =
+ sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ default:
+ dev_warn(&ireq->isci_host->pdev->dev,
+ "%s: request %p has no valid protocol\n",
+ __func__, ireq);
+ break;
+ }
+ if (is_tx || is_tx_rx) {
+ BUG_ON(is_tx && is_tx_rx);
+
+ sci_remote_node_context_suspend(
+ &ireq->target_device->rnc,
+ SCI_HW_SUSPEND,
+ (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+ : SCU_EVENT_TL_RNC_SUSPEND_TX);
+ }
+}
+
enum sci_status
sci_io_request_tc_completion(struct isci_request *ireq,
- u32 completion_code)
+ u32 completion_code)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
+ /* Decode those completions that signal upcoming suspension events. */
+ sci_request_handle_suspending_completions(
+ ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
+
switch (state) {
case SCI_REQ_STARTED:
return request_started_state_tc_event(ireq, completion_code);
@@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu(
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- * the LLDD with respect to completing this request or forcing an abort
- * condition on the I/O.
* @open_rej_reason: This parameter specifies the encoded reason for the
* abandon-class reject.
*
@@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status(
struct sas_task *task,
enum service_response *response_ptr,
enum exec_status *status_ptr,
- enum isci_completion_selection *complete_to_host_ptr,
enum sas_open_rej_reason open_rej_reason)
{
/* Task in the target is done. */
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAS_OPEN_REJECT;
- *complete_to_host_ptr = isci_perform_normal_io_completion;
task->task_status.open_rej_reason = open_rej_reason;
}
@@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status(
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- * the LLDD with respect to completing this request or forcing an abort
- * condition on the I/O.
*
* none.
*/
@@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors(
struct isci_request *request,
struct sas_task *task,
enum service_response *response_ptr,
- enum exec_status *status_ptr,
- enum isci_completion_selection *complete_to_host_ptr)
+ enum exec_status *status_ptr)
{
unsigned int cstatus;
@@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr =
- isci_perform_normal_io_completion;
} else {
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
@@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr =
- isci_perform_error_io_completion;
}
break;
@@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_normal_io_completion;
break;
@@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors(
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+ SAS_OREJ_WRONG_DEST);
break;
case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
@@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors(
*/
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+ SAS_OREJ_RESV_AB0);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+ SAS_OREJ_RESV_AB1);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+ SAS_OREJ_RESV_AB2);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+ SAS_OREJ_RESV_AB3);
break;
case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+ SAS_OREJ_BAD_DEST);
break;
case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_STP_NORES);
+ SAS_OREJ_STP_NORES);
break;
case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_EPROTO);
+ SAS_OREJ_EPROTO);
break;
case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+ SAS_OREJ_CONN_RATE);
break;
case SCU_TASK_DONE_LL_R_ERR:
@@ -2592,95 +2706,12 @@ static void isci_request_handle_controller_specific_errors(
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAM_STAT_TASK_ABORTED;
- if (task->task_proto == SAS_PROTOCOL_SMP) {
+ if (task->task_proto == SAS_PROTOCOL_SMP)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_normal_io_completion;
- } else {
+ else
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_error_io_completion;
- }
- break;
- }
-}
-
-/**
- * isci_task_save_for_upper_layer_completion() - This function saves the
- * request for later completion to the upper layer driver.
- * @host: This parameter is a pointer to the host on which the the request
- * should be queued (either as an error or success).
- * @request: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
- * none.
- */
-static void isci_task_save_for_upper_layer_completion(
- struct isci_host *host,
- struct isci_request *request,
- enum service_response response,
- enum exec_status status,
- enum isci_completion_selection task_notification_selection)
-{
- struct sas_task *task = isci_request_access_task(request);
-
- task_notification_selection
- = isci_task_set_completion_status(task, response, status,
- task_notification_selection);
-
- /* Tasks aborted specifically by a call to the lldd_abort_task
- * function should not be completed to the host in the regular path.
- */
- switch (task_notification_selection) {
-
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
-
- /* Add to the completed list. */
- list_add(&request->completed_node,
- &host->requests_to_complete);
-
- /* Take the request off the device's pending request list. */
- list_del_init(&request->dev_node);
- break;
-
- case isci_perform_aborted_io_completion:
- /* No notification to libsas because this request is
- * already in the abort path.
- */
- /* Wake up whatever process was waiting for this
- * request to complete.
- */
- WARN_ON(request->io_request_completion == NULL);
-
- if (request->io_request_completion != NULL) {
-
- /* Signal whoever is waiting that this
- * request is complete.
- */
- complete(request->io_request_completion);
- }
- break;
-
- case isci_perform_error_io_completion:
- /* Use sas_task_abort */
- /* Add to the aborted list. */
- list_add(&request->completed_node,
- &host->requests_to_errorback);
- break;
-
- default:
- /* Add to the error to libsas list. */
- list_add(&request->completed_node,
- &host->requests_to_errorback);
break;
}
- dev_dbg(&host->pdev->dev,
- "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__, task_notification_selection, task,
- (task) ? task->task_status.resp : 0, response,
- (task) ? task->task_status.stat : 0, status);
}
static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
struct isci_remote_device *idev = request->target_device;
enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK;
- enum isci_request_status request_status;
- enum isci_completion_selection complete_to_host
- = isci_perform_normal_io_completion;
dev_dbg(&ihost->pdev->dev,
- "%s: request = %p, task = %p,\n"
+ "%s: request = %p, task = %p, "
"task->data_dir = %d completion_status = 0x%x\n",
- __func__,
- request,
- task,
- task->data_dir,
- completion_status);
+ __func__, request, task, task->data_dir, completion_status);
- spin_lock(&request->state_lock);
- request_status = request->status;
+ /* The request is done from an SCU HW perspective. */
- /* Decode the request status. Note that if the request has been
- * aborted by a task management function, we don't care
- * what the status is.
- */
- switch (request_status) {
-
- case aborted:
- /* "aborted" indicates that the request was aborted by a task
- * management function, since once a task management request is
- * perfomed by the device, the request only completes because
- * of the subsequent driver terminate.
- *
- * Aborted also means an external thread is explicitly managing
- * this request, so that we do not complete it up the stack.
- *
- * The target is still there (since the TMF was successful).
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_COMPLETE;
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__, request, task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ isci_process_stp_response(task, &request->stp.rsp);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
- complete_to_host = isci_perform_aborted_io_completion;
- /* This was an aborted request. */
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
- spin_unlock(&request->state_lock);
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
break;
- case aborting:
- /* aborting means that the task management function tried and
- * failed to abort the request. We need to note the request
- * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
- * target as down.
- *
- * Aborting also means an external thread is explicitly managing
- * this request, so that we do not complete it up the stack.
- */
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_UNDELIVERED;
- if (!idev)
- /* The device has been /is being stopped. Note that
- * we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_PHY_DOWN;
+ if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
- complete_to_host = isci_perform_aborted_io_completion;
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
- /* This was an aborted request. */
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
- spin_unlock(&request->state_lock);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__, status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
+ __func__);
break;
- case terminating:
+ case SCI_IO_FAILURE_TERMINATED:
- /* This was an terminated request. This happens when
- * the I/O is being terminated because of an action on
- * the device (reset, tear down, etc.), and the I/O needs
- * to be completed up the stack.
- */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__, request, task);
+
+ /* The request was terminated explicitly. */
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_UNDELIVERED;
/* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_aborted_io_completion;
-
- /* This was a terminated request. */
-
- spin_unlock(&request->state_lock);
break;
- case dead:
- /* This was a terminated request that timed-out during the
- * termination process. There is no task to complete to
- * libsas.
- */
- complete_to_host = isci_perform_normal_io_completion;
- spin_unlock(&request->state_lock);
- break;
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
- default:
-
- /* The request is done from an SCU HW perspective. */
- request->status = completed;
-
- spin_unlock(&request->state_lock);
-
- /* This is an active request being completed from the core. */
- switch (completion_status) {
-
- case SCI_IO_FAILURE_RESPONSE_VALID:
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
- __func__,
- request,
- task);
-
- if (sas_protocol_ata(task->task_proto)) {
- isci_process_stp_response(task, &request->stp.rsp);
- } else if (SAS_PROTOCOL_SSP == task->task_proto) {
-
- /* crack the iu response buffer. */
- resp_iu = &request->ssp.rsp;
- isci_request_process_response_iu(task, resp_iu,
- &ihost->pdev->dev);
-
- } else if (SAS_PROTOCOL_SMP == task->task_proto) {
-
- dev_err(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
- "SAS_PROTOCOL_SMP protocol\n",
- __func__);
-
- } else
- dev_err(&ihost->pdev->dev,
- "%s: unknown protocol\n", __func__);
-
- /* use the task status set in the task struct by the
- * isci_request_process_response_iu call.
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = task->task_status.resp;
- status = task->task_status.stat;
- break;
+ isci_request_handle_controller_specific_errors(idev, request,
+ task, &response,
+ &status);
+ break;
- case SCI_IO_SUCCESS:
- case SCI_IO_SUCCESS_IO_DONE_EARLY:
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
- response = SAS_TASK_COMPLETE;
- status = SAM_STAT_GOOD;
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
- if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
- /* This was an SSP / STP / SATA transfer.
- * There is a possibility that less data than
- * the maximum was transferred.
- */
- u32 transferred_length = sci_req_tx_bytes(request);
+ case SCI_FAILURE_RETRY_REQUIRED:
- task->task_status.residual
- = task->total_xfer_len - transferred_length;
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
- /* If there were residual bytes, call this an
- * underrun.
- */
- if (task->task_status.residual != 0)
- status = SAS_DATA_UNDERRUN;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
- __func__,
- status);
- } else
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_SUCCESS\n",
- __func__);
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
- break;
+ response = SAS_TASK_UNDELIVERED;
- case SCI_IO_FAILURE_TERMINATED:
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
- __func__,
- request,
- task);
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
- /* The request was terminated explicitly. No handling
- * is needed in the SCSI error handler path.
- */
+ if (SAS_PROTOCOL_SMP == task->task_proto)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_UNDELIVERED;
-
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_normal_io_completion;
- break;
-
- case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
-
- isci_request_handle_controller_specific_errors(
- idev, request, task, &response, &status,
- &complete_to_host);
-
- break;
-
- case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
- /* This is a special case, in that the I/O completion
- * is telling us that the device needs a reset.
- * In order for the device reset condition to be
- * noticed, the I/O has to be handled in the error
- * handler. Set the reset flag and cause the
- * SCSI error thread to be scheduled.
- */
- spin_lock_irqsave(&task->task_state_lock, task_flags);
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, task_flags);
-
- /* Fail the I/O. */
- response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
-
- complete_to_host = isci_perform_error_io_completion;
+ else
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- break;
-
- case SCI_FAILURE_RETRY_REQUIRED:
-
- /* Fail the I/O so it can be retried. */
- response = SAS_TASK_UNDELIVERED;
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_normal_io_completion;
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- break;
-
-
- default:
- /* Catch any otherwise unhandled error codes here. */
- dev_dbg(&ihost->pdev->dev,
- "%s: invalid completion code: 0x%x - "
- "isci_request = %p\n",
- __func__, completion_status, request);
-
- response = SAS_TASK_UNDELIVERED;
-
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- if (SAS_PROTOCOL_SMP == task->task_proto) {
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- complete_to_host = isci_perform_normal_io_completion;
- } else {
- clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- complete_to_host = isci_perform_error_io_completion;
- }
- break;
- }
break;
}
@@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
break;
}
- /* Put the completed request on the correct list */
- isci_task_save_for_upper_layer_completion(ihost, request, response,
- status, complete_to_host
- );
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
/* complete the io request to the core. */
sci_controller_complete_io(ihost, request->target_device, request);
@@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
* task to recognize the already completed case.
*/
set_bit(IREQ_TERMINATED, &request->flags);
+
+ ireq_done(ihost, request, task);
}
static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
@@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost,
sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
ireq->target_device = idev;
- ireq->protocol = SCIC_NO_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_NONE;
ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
ireq->sci_status = SCI_SUCCESS;
@@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost,
if (dev->dev_type == SAS_END_DEV)
/* pass */;
- else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ else if (dev_is_sata(dev))
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
else if (dev_is_expander(dev))
/* pass */;
@@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
/* Build the common part of the request */
sci_general_request_construct(ihost, idev, ireq);
- if (dev->dev_type == SAS_END_DEV ||
- dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
set_bit(IREQ_TMF, &ireq->flags);
memset(ireq->tc, 0, sizeof(struct scu_task_context));
+
+ /* Set the protocol indicator. */
+ if (dev_is_sata(dev))
+ ireq->protocol = SAS_PROTOCOL_STP;
+ else
+ ireq->protocol = SAS_PROTOCOL_SSP;
} else
status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
@@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev,
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE;
- ireq->protocol = SCIC_SMP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_SMP;
/* byte swap the smp request. */
@@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
ireq->io_request_completion = NULL;
ireq->flags = 0;
ireq->num_sg_entries = 0;
- INIT_LIST_HEAD(&ireq->completed_node);
- INIT_LIST_HEAD(&ireq->dev_node);
- isci_request_change_state(ireq, allocated);
return ireq;
}
@@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return status;
}
-
/* Either I/O started OK, or the core has signaled that
* the device needs a target reset.
- *
- * In either case, hold onto the I/O for later.
- *
- * Update it's status and add it to the list in the
- * remote device object.
*/
- list_add(&ireq->dev_node, &idev->reqs_in_process);
-
- if (status == SCI_SUCCESS) {
- isci_request_change_state(ireq, started);
- } else {
+ if (status != SCI_SUCCESS) {
/* The request did not really start in the
* hardware, so clear the request handle
* here so no terminations will be done.
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
- isci_request_change_state(ireq, completed);
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 057f2378452d..aff95317fcf4 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -61,30 +61,6 @@
#include "scu_task_context.h"
/**
- * struct isci_request_status - This enum defines the possible states of an I/O
- * request.
- *
- *
- */
-enum isci_request_status {
- unallocated = 0x00,
- allocated = 0x01,
- started = 0x02,
- completed = 0x03,
- aborting = 0x04,
- aborted = 0x05,
- terminating = 0x06,
- dead = 0x07
-};
-
-enum sci_request_protocol {
- SCIC_NO_PROTOCOL,
- SCIC_SMP_PROTOCOL,
- SCIC_SSP_PROTOCOL,
- SCIC_STP_PROTOCOL
-}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
-
-/**
* isci_stp_request - extra request infrastructure to handle pio/atapi protocol
* @pio_len - number of bytes requested at PIO setup
* @status - pio setup ending status value to tell us if we need
@@ -104,11 +80,14 @@ struct isci_stp_request {
};
struct isci_request {
- enum isci_request_status status;
#define IREQ_COMPLETE_IN_TARGET 0
#define IREQ_TERMINATED 1
#define IREQ_TMF 2
#define IREQ_ACTIVE 3
+ #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
+ #define IREQ_TC_ABORT_POSTED 5
+ #define IREQ_ABORT_PATH_ACTIVE 6
+ #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
unsigned long flags;
/* XXX kill ttype and ttype_ptr, allocate full sas_task */
union ttype_ptr_union {
@@ -116,11 +95,6 @@ struct isci_request {
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
} ttype_ptr;
struct isci_host *isci_host;
- /* For use in the requests_to_{complete|abort} lists: */
- struct list_head completed_node;
- /* For use in the reqs_in_process list: */
- struct list_head dev_node;
- spinlock_t state_lock;
dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries;
@@ -140,7 +114,7 @@ struct isci_request {
struct isci_host *owning_controller;
struct isci_remote_device *target_device;
u16 io_tag;
- enum sci_request_protocol protocol;
+ enum sas_protocol protocol;
u32 scu_status; /* hardware result */
u32 sci_status; /* upper layer disposition */
u32 post_context;
@@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
return ireq->request_daddr + (requested_addr - base_addr);
}
-/**
- * isci_request_change_state() - This function sets the status of the request
- * object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- */
-static inline enum isci_request_status
-isci_request_change_state(struct isci_request *isci_request,
- enum isci_request_status status)
-{
- enum isci_request_status old_state;
- unsigned long flags;
-
- dev_dbg(&isci_request->isci_host->pdev->dev,
- "%s: isci_request = %p, state = 0x%x\n",
- __func__,
- isci_request,
- status);
-
- BUG_ON(isci_request == NULL);
-
- spin_lock_irqsave(&isci_request->state_lock, flags);
- old_state = isci_request->status;
- isci_request->status = status;
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- return old_state;
-}
-
-/**
- * isci_request_change_started_to_newstate() - This function sets the status of
- * the request object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_newstate(struct isci_request *isci_request,
- struct completion *completion_ptr,
- enum isci_request_status newstate)
-{
- enum isci_request_status old_state;
- unsigned long flags;
-
- spin_lock_irqsave(&isci_request->state_lock, flags);
-
- old_state = isci_request->status;
-
- if (old_state == started || old_state == aborting) {
- BUG_ON(isci_request->io_request_completion != NULL);
-
- isci_request->io_request_completion = completion_ptr;
- isci_request->status = newstate;
- }
-
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- dev_dbg(&isci_request->isci_host->pdev->dev,
- "%s: isci_request = %p, old_state = 0x%x\n",
- __func__,
- isci_request,
- old_state);
-
- return old_state;
-}
-
-/**
- * isci_request_change_started_to_aborted() - This function sets the status of
- * the request object.
- * @request: This parameter points to the isci_request object
- * @completion_ptr: This parameter is saved as the kernel completion structure
- * signalled when the old request completes.
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_aborted(struct isci_request *isci_request,
- struct completion *completion_ptr)
-{
- return isci_request_change_started_to_newstate(isci_request,
- completion_ptr,
- aborted);
-}
-
#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
@@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, u16 tag);
-void isci_terminate_pending_requests(struct isci_host *ihost,
- struct isci_remote_device *idev);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
@@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task)
task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
}
-
#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
index c8b329c695f9..071cb74a211c 100644
--- a/drivers/scsi/isci/scu_completion_codes.h
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -224,6 +224,7 @@
* 32-bit value like we want, each immediate value must be cast to a u32.
*/
#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08)
#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
@@ -237,6 +238,7 @@
#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B)
#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254ede9d4..6bc74eb012c9 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
enum exec_status status)
{
- enum isci_completion_selection disposition;
+ unsigned long flags;
- disposition = isci_perform_normal_io_completion;
- disposition = isci_task_set_completion_status(task, response, status,
- disposition);
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
+ __func__, task, response, status);
- /* Tasks aborted specifically by a call to the lldd_abort_task
- * function should not be completed to the host in the regular path.
- */
- switch (disposition) {
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
- dev_dbg(&ihost->pdev->dev,
- "%s: Normal - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
-
- task->lldd_task = NULL;
- task->task_done(task);
- break;
-
- case isci_perform_aborted_io_completion:
- /*
- * No notification because this request is already in the
- * abort path.
- */
- dev_dbg(&ihost->pdev->dev,
- "%s: Aborted - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
- break;
+ spin_lock_irqsave(&task->task_state_lock, flags);
- case isci_perform_error_io_completion:
- /* Use sas_task_abort */
- dev_dbg(&ihost->pdev->dev,
- "%s: Error - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
- sas_task_abort(task);
- break;
+ task->task_status.resp = response;
+ task->task_status.stat = status;
- default:
- dev_dbg(&ihost->pdev->dev,
- "%s: isci task notification default case!",
- __func__);
- sas_task_abort(task);
- break;
- }
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->lldd_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ task->task_done(task);
}
#define for_each_sas_task(num, task) \
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return ireq;
}
-/**
-* isci_request_mark_zombie() - This function must be called with scic_lock held.
-*/
-static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
-{
- struct completion *tmf_completion = NULL;
- struct completion *req_completion;
-
- /* Set the request state to "dead". */
- ireq->status = dead;
-
- req_completion = ireq->io_request_completion;
- ireq->io_request_completion = NULL;
-
- if (test_bit(IREQ_TMF, &ireq->flags)) {
- /* Break links with the TMF request. */
- struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-
- /* In the case where a task request is dying,
- * the thread waiting on the complete will sit and
- * timeout unless we wake it now. Since the TMF
- * has a default error status, complete it here
- * to wake the waiting thread.
- */
- if (tmf) {
- tmf_completion = tmf->complete;
- tmf->complete = NULL;
- }
- ireq->ttype_ptr.tmf_task_ptr = NULL;
- dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
- __func__, tmf->tmf_code, tmf->io_tag);
- } else {
- /* Break links with the sas_task - the callback is done
- * elsewhere.
- */
- struct sas_task *task = isci_request_access_task(ireq);
-
- if (task)
- task->lldd_task = NULL;
-
- ireq->ttype_ptr.io_task_ptr = NULL;
- }
-
- dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
- ireq->io_tag);
-
- /* Don't force waiting threads to timeout. */
- if (req_completion)
- complete(req_completion);
-
- if (tmf_completion != NULL)
- complete(tmf_completion);
-}
-
static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
spin_unlock_irqrestore(&ihost->scic_lock, flags);
goto err_tci;
}
-
- if (tmf->cb_state_func != NULL)
- tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
-
- isci_request_change_state(ireq, started);
-
- /* add the request to the remote device request list. */
- list_add(&ireq->dev_node, &idev->reqs_in_process);
-
spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ /* The RNC must be unsuspended before the TMF can get a response. */
+ isci_remote_device_resume_from_abort(ihost, idev);
+
/* Wait for the TMF to complete, or a timeout. */
timeleft = wait_for_completion_timeout(&completion,
msecs_to_jiffies(timeout_ms));
@@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* The TMF did not complete - this could be because
* of an unplug. Terminate the TMF request now.
*/
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- if (tmf->cb_state_func != NULL)
- tmf->cb_state_func(isci_tmf_timed_out, tmf,
- tmf->cb_data);
-
- sci_controller_terminate_request(ihost, idev, ireq);
-
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- timeleft = wait_for_completion_timeout(
- &completion,
- msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
- if (!timeleft) {
- /* Strange condition - the termination of the TMF
- * request timed-out.
- */
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- /* If the TMF status has not changed, kill it. */
- if (tmf->status == SCI_FAILURE_TIMEOUT)
- isci_request_mark_zombie(ihost, ireq);
-
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- }
+ isci_remote_device_suspend_terminate(ihost, idev, ireq);
}
isci_print_tmf(ihost, tmf);
@@ -476,315 +362,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
static void isci_task_build_tmf(struct isci_tmf *tmf,
- enum isci_tmf_function_codes code,
- void (*tmf_sent_cb)(enum isci_tmf_cb_state,
- struct isci_tmf *,
- void *),
- void *cb_data)
+ enum isci_tmf_function_codes code)
{
memset(tmf, 0, sizeof(*tmf));
-
- tmf->tmf_code = code;
- tmf->cb_state_func = tmf_sent_cb;
- tmf->cb_data = cb_data;
+ tmf->tmf_code = code;
}
static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
enum isci_tmf_function_codes code,
- void (*tmf_sent_cb)(enum isci_tmf_cb_state,
- struct isci_tmf *,
- void *),
struct isci_request *old_request)
{
- isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+ isci_task_build_tmf(tmf, code);
tmf->io_tag = old_request->io_tag;
}
/**
- * isci_task_validate_request_to_abort() - This function checks the given I/O
- * against the "started" state. If the request is still "started", it's
- * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
- * BEFORE CALLING THIS FUNCTION.
- * @isci_request: This parameter specifies the request object to control.
- * @isci_host: This parameter specifies the ISCI host object
- * @isci_device: This is the device to which the request is pending.
- * @aborted_io_completion: This is a completion structure that will be added to
- * the request in case it is changed to aborting; this completion is
- * triggered when the request is fully completed.
- *
- * Either "started" on successful change of the task status to "aborted", or
- * "unallocated" if the task cannot be controlled.
- */
-static enum isci_request_status isci_task_validate_request_to_abort(
- struct isci_request *isci_request,
- struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- struct completion *aborted_io_completion)
-{
- enum isci_request_status old_state = unallocated;
-
- /* Only abort the task if it's in the
- * device's request_in_process list
- */
- if (isci_request && !list_empty(&isci_request->dev_node)) {
- old_state = isci_request_change_started_to_aborted(
- isci_request, aborted_io_completion);
-
- }
-
- return old_state;
-}
-
-static int isci_request_is_dealloc_managed(enum isci_request_status stat)
-{
- switch (stat) {
- case aborted:
- case aborting:
- case terminating:
- case completed:
- case dead:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * isci_terminate_request_core() - This function will terminate the given
- * request, and wait for it to complete. This function must only be called
- * from a thread that can wait. Note that the request is terminated and
- * completed (back to the host, if started there).
- * @ihost: This SCU.
- * @idev: The target.
- * @isci_request: The I/O request to be terminated.
- *
- */
-static void isci_terminate_request_core(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *isci_request)
-{
- enum sci_status status = SCI_SUCCESS;
- bool was_terminated = false;
- bool needs_cleanup_handling = false;
- unsigned long flags;
- unsigned long termination_completed = 1;
- struct completion *io_request_completion;
-
- dev_dbg(&ihost->pdev->dev,
- "%s: device = %p; request = %p\n",
- __func__, idev, isci_request);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- io_request_completion = isci_request->io_request_completion;
-
- /* Note that we are not going to control
- * the target to abort the request.
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
-
- /* Make sure the request wasn't just sitting around signalling
- * device condition (if the request handle is NULL, then the
- * request completed but needed additional handling here).
- */
- if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
- was_terminated = true;
- needs_cleanup_handling = true;
- status = sci_controller_terminate_request(ihost,
- idev,
- isci_request);
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- /*
- * The only time the request to terminate will
- * fail is when the io request is completed and
- * being aborted.
- */
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_controller_terminate_request"
- " returned = 0x%x\n",
- __func__, status);
-
- isci_request->io_request_completion = NULL;
-
- } else {
- if (was_terminated) {
- dev_dbg(&ihost->pdev->dev,
- "%s: before completion wait (%p/%p)\n",
- __func__, isci_request, io_request_completion);
-
- /* Wait here for the request to complete. */
- termination_completed
- = wait_for_completion_timeout(
- io_request_completion,
- msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
- if (!termination_completed) {
-
- /* The request to terminate has timed out. */
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- /* Check for state changes. */
- if (!test_bit(IREQ_TERMINATED,
- &isci_request->flags)) {
-
- /* The best we can do is to have the
- * request die a silent death if it
- * ever really completes.
- */
- isci_request_mark_zombie(ihost,
- isci_request);
- needs_cleanup_handling = true;
- } else
- termination_completed = 1;
-
- spin_unlock_irqrestore(&ihost->scic_lock,
- flags);
-
- if (!termination_completed) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: *** Timeout waiting for "
- "termination(%p/%p)\n",
- __func__, io_request_completion,
- isci_request);
-
- /* The request can no longer be referenced
- * safely since it may go away if the
- * termination every really does complete.
- */
- isci_request = NULL;
- }
- }
- if (termination_completed)
- dev_dbg(&ihost->pdev->dev,
- "%s: after completion wait (%p/%p)\n",
- __func__, isci_request, io_request_completion);
- }
-
- if (termination_completed) {
-
- isci_request->io_request_completion = NULL;
-
- /* Peek at the status of the request. This will tell
- * us if there was special handling on the request such that it
- * needs to be detached and freed here.
- */
- spin_lock_irqsave(&isci_request->state_lock, flags);
-
- needs_cleanup_handling
- = isci_request_is_dealloc_managed(
- isci_request->status);
-
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- }
- if (needs_cleanup_handling) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: cleanup isci_device=%p, request=%p\n",
- __func__, idev, isci_request);
-
- if (isci_request != NULL) {
- spin_lock_irqsave(&ihost->scic_lock, flags);
- isci_free_tag(ihost, isci_request->io_tag);
- isci_request_change_state(isci_request, unallocated);
- list_del_init(&isci_request->dev_node);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- }
- }
- }
-}
-
-/**
- * isci_terminate_pending_requests() - This function will change the all of the
- * requests on the given device's state to "aborting", will terminate the
- * requests, and wait for them to complete. This function must only be
- * called from a thread that can wait. Note that the requests are all
- * terminated and completed (back to the host, if started there).
- * @isci_host: This parameter specifies SCU.
- * @idev: This parameter specifies the target.
- *
- */
-void isci_terminate_pending_requests(struct isci_host *ihost,
- struct isci_remote_device *idev)
-{
- struct completion request_completion;
- enum isci_request_status old_state;
- unsigned long flags;
- LIST_HEAD(list);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- list_splice_init(&idev->reqs_in_process, &list);
-
- /* assumes that isci_terminate_request_core deletes from the list */
- while (!list_empty(&list)) {
- struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
-
- /* Change state to "terminating" if it is currently
- * "started".
- */
- old_state = isci_request_change_started_to_newstate(ireq,
- &request_completion,
- terminating);
- switch (old_state) {
- case started:
- case completed:
- case aborting:
- break;
- default:
- /* termination in progress, or otherwise dispositioned.
- * We know the request was on 'list' so should be safe
- * to move it back to reqs_in_process
- */
- list_move(&ireq->dev_node, &idev->reqs_in_process);
- ireq = NULL;
- break;
- }
-
- if (!ireq)
- continue;
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- init_completion(&request_completion);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev=%p request=%p; task=%p old_state=%d\n",
- __func__, idev, ireq,
- (!test_bit(IREQ_TMF, &ireq->flags)
- ? isci_request_access_task(ireq)
- : NULL),
- old_state);
-
- /* If the old_state is started:
- * This request was not already being aborted. If it had been,
- * then the aborting I/O (ie. the TMF request) would not be in
- * the aborting state, and thus would be terminated here. Note
- * that since the TMF completion's call to the kernel function
- * "complete()" does not happen until the pending I/O request
- * terminate fully completes, we do not have to implement a
- * special wait here for already aborting requests - the
- * termination of the TMF request will force the request
- * to finish it's already started terminate.
- *
- * If old_state == completed:
- * This request completed from the SCU hardware perspective
- * and now just needs cleaning up in terms of freeing the
- * request and potentially calling up to libsas.
- *
- * If old_state == aborting:
- * This request has already gone through a TMF timeout, but may
- * not have been terminated; needs cleaning up at least.
- */
- isci_terminate_request_core(ihost, idev, ireq);
- spin_lock_irqsave(&ihost->scic_lock, flags);
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
-
-/**
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
* Template functions.
* @lun: This parameter specifies the lun to be reset.
@@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas(
* value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
* was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
*/
- isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
@@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas(
int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
{
- struct isci_host *isci_host = dev_to_ihost(dev);
- struct isci_remote_device *isci_device;
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
unsigned long flags;
- int ret;
+ int ret = TMF_RESP_FUNC_COMPLETE;
- spin_lock_irqsave(&isci_host->scic_lock, flags);
- isci_device = isci_lookup_device(dev);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_get_device(dev->lldd_dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
- __func__, dev, isci_host, isci_device);
+ __func__, dev, ihost, idev);
- if (!isci_device) {
- /* If the device is gone, stop the escalations. */
- dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
+ if (!idev) {
+ /* If the device is gone, escalate to I_T_Nexus_Reset. */
+ dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_FAILED;
goto out;
}
- /* Send the task management part of the reset. */
- if (dev_is_sata(dev)) {
- sas_ata_schedule_reset(dev);
- ret = TMF_RESP_FUNC_COMPLETE;
- } else
- ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
-
- /* If the LUN reset worked, all the I/O can now be terminated. */
- if (ret == TMF_RESP_FUNC_COMPLETE)
- /* Terminate all I/O now. */
- isci_terminate_pending_requests(isci_host,
- isci_device);
-
+ /* Suspend the RNC, kill all TCs */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
+ /* The suspend/terminate only fails if isci_get_device fails */
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+ /* All pending I/Os have been terminated and cleaned up. */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (dev_is_sata(dev))
+ sas_ata_schedule_reset(dev);
+ else
+ /* Send the task management part of the reset. */
+ ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
+ }
out:
- isci_put_device(isci_device);
+ isci_put_device(idev);
return ret;
}
@@ -882,63 +476,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
/* Task Management Functions. Must be called from process context. */
/**
- * isci_abort_task_process_cb() - This is a helper function for the abort task
- * TMF command. It manages the request state with respect to the successful
- * transmission / completion of the abort task request.
- * @cb_state: This parameter specifies when this function was called - after
- * the TMF request has been started and after it has timed-out.
- * @tmf: This parameter specifies the TMF in progress.
- *
- *
- */
-static void isci_abort_task_process_cb(
- enum isci_tmf_cb_state cb_state,
- struct isci_tmf *tmf,
- void *cb_data)
-{
- struct isci_request *old_request;
-
- old_request = (struct isci_request *)cb_data;
-
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: tmf=%p, old_request=%p\n",
- __func__, tmf, old_request);
-
- switch (cb_state) {
-
- case isci_tmf_started:
- /* The TMF has been started. Nothing to do here, since the
- * request state was already set to "aborted" by the abort
- * task function.
- */
- if ((old_request->status != aborted)
- && (old_request->status != completed))
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
- __func__, old_request->status, tmf, old_request);
- break;
-
- case isci_tmf_timed_out:
-
- /* Set the task's state to "aborting", since the abort task
- * function thread set it to "aborted" (above) in anticipation
- * of the task management request working correctly. Since the
- * timeout has now fired, the TMF request failed. We set the
- * state such that the request completion will indicate the
- * device is no longer present.
- */
- isci_request_change_state(old_request, aborting);
- break;
-
- default:
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
- __func__, cb_state, tmf, old_request);
- break;
- }
-}
-
-/**
* isci_task_abort_task() - This function is one of the SAS Domain Template
* functions. This function is called by libsas to abort a specified task.
* @task: This parameter specifies the SAS task to abort.
@@ -947,22 +484,20 @@ static void isci_abort_task_process_cb(
*/
int isci_task_abort_task(struct sas_task *task)
{
- struct isci_host *isci_host = dev_to_ihost(task->dev);
+ struct isci_host *ihost = dev_to_ihost(task->dev);
DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
struct isci_request *old_request = NULL;
- enum isci_request_status old_state;
- struct isci_remote_device *isci_device = NULL;
+ struct isci_remote_device *idev = NULL;
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
- int perform_termination = 0;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
* in the device, because tasks driving resets may land here
* after completion in the core.
*/
- spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
spin_lock(&task->task_state_lock);
old_request = task->lldd_task;
@@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task)
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request)
- isci_device = isci_lookup_device(task->dev);
+ idev = isci_get_device(task->dev->lldd_dev);
spin_unlock(&task->task_state_lock);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
- "%s: dev = %p, task = %p, old_request == %p\n",
- __func__, isci_device, task, old_request);
+ dev_warn(&ihost->pdev->dev,
+ "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
+ __func__, idev,
+ (dev_is_sata(task->dev) ? "STP/SATA"
+ : ((dev_is_expander(task->dev))
+ ? "SMP"
+ : "SSP")),
+ ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
+ ? " IDEV_GONE"
+ : "")
+ : " <NULL>"),
+ task, old_request);
/* Device reset conditions signalled in task_state_flags are the
* responsbility of libsas to observe at the start of the error
* handler thread.
*/
- if (!isci_device || !old_request) {
+ if (!idev || !old_request) {
/* The request has already completed and there
* is nothing to do here other than to set the task
* done bit, and indicate that the task abort function
@@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task)
ret = TMF_RESP_FUNC_COMPLETE;
- dev_dbg(&isci_host->pdev->dev,
- "%s: abort task not needed for %p\n",
- __func__, task);
+ dev_warn(&ihost->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
goto out;
}
-
- spin_lock_irqsave(&isci_host->scic_lock, flags);
-
- /* Check the request status and change to "aborted" if currently
- * "starting"; if true then set the I/O kernel completion
- * struct that will be triggered when the request completes.
- */
- old_state = isci_task_validate_request_to_abort(
- old_request, isci_host, isci_device,
- &aborted_io_completion);
- if ((old_state != started) &&
- (old_state != completed) &&
- (old_state != aborting)) {
-
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
- /* The request was already being handled by someone else (because
- * they got to set the state away from started).
- */
- dev_dbg(&isci_host->pdev->dev,
- "%s: device = %p; old_request %p already being aborted\n",
- __func__,
- isci_device, old_request);
- ret = TMF_RESP_FUNC_COMPLETE;
+ /* Suspend the RNC, kill the TC */
+ if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
+ != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: isci_remote_device_reset_terminate(dev=%p, "
+ "req=%p, task=%p) failed\n",
+ __func__, idev, old_request, task);
+ ret = TMF_RESP_FUNC_FAILED;
goto out;
}
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
if (task->task_proto == SAS_PROTOCOL_SMP ||
sas_protocol_ata(task->task_proto) ||
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+ test_bit(IDEV_GONE, &idev->flags)) {
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
- "%s: %s request"
- " or complete_in_target (%d), thus no TMF\n",
- __func__,
- ((task->task_proto == SAS_PROTOCOL_SMP)
- ? "SMP"
- : (sas_protocol_ata(task->task_proto)
- ? "SATA/STP"
- : "<other>")
- ),
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
-
- if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- ret = TMF_RESP_FUNC_COMPLETE;
- } else {
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- }
+ /* No task to send, so explicitly resume the device here */
+ isci_remote_device_resume_from_abort(ihost, idev);
- /* STP and SMP devices are not sent a TMF, but the
- * outstanding I/O request is terminated below. This is
- * because SATA/STP and SMP discovery path timeouts directly
- * call the abort task interface for cleanup.
- */
- perform_termination = 1;
+ dev_warn(&ihost->pdev->dev,
+ "%s: %s request"
+ " or complete_in_target (%d), "
+ "or IDEV_GONE (%d), thus no TMF\n",
+ __func__,
+ ((task->task_proto == SAS_PROTOCOL_SMP)
+ ? "SMP"
+ : (sas_protocol_ata(task->task_proto)
+ ? "SATA/STP"
+ : "<other>")
+ ),
+ test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags),
+ test_bit(IDEV_GONE, &idev->flags));
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ ret = TMF_RESP_FUNC_COMPLETE;
} else {
/* Fill in the tmf stucture */
isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
- isci_abort_task_process_cb,
old_request);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ /* Send the task management request. */
#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
- ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+ ret = isci_task_execute_tmf(ihost, idev, &tmf,
ISCI_ABORT_TASK_TIMEOUT_MS);
-
- if (ret == TMF_RESP_FUNC_COMPLETE)
- perform_termination = 1;
- else
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_task_send_tmf failed\n", __func__);
}
- if (perform_termination) {
- set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
-
- /* Clean up the request on our side, and wait for the aborted
- * I/O to complete.
- */
- isci_terminate_request_core(isci_host, isci_device,
- old_request);
- }
-
- /* Make sure we do not leave a reference to aborted_io_completion */
- old_request->io_request_completion = NULL;
- out:
- isci_put_device(isci_device);
+out:
+ dev_warn(&ihost->pdev->dev,
+ "%s: Done; dev = %p, task = %p , old_request == %p\n",
+ __func__, idev, task, old_request);
+ isci_put_device(idev);
return ret;
}
@@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost,
{
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete = NULL;
- struct completion *request_complete = ireq->io_request_completion;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, status=%d\n",
__func__, ireq, completion_status);
- isci_request_change_state(ireq, completed);
-
set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
if (tmf) {
@@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost,
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
- /* As soon as something is in the terminate path, deallocation is
- * managed there. Note that the final non-managed state of a task
- * request is "completed".
- */
- if ((ireq->status == completed) ||
- !isci_request_is_dealloc_managed(ireq->status)) {
- isci_request_change_state(ireq, unallocated);
- isci_free_tag(ihost, ireq->io_tag);
- list_del_init(&ireq->dev_node);
- }
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
- /* "request_complete" is set if the task was being terminated. */
- if (request_complete)
- complete(request_complete);
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
/* The task management part completes last. */
if (tmf_complete)
@@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost,
struct domain_device *dev,
struct isci_remote_device *idev)
{
- int rc;
- unsigned long flags;
- enum sci_status status;
+ int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
struct sas_phy *phy = sas_get_local_phy(dev);
struct isci_port *iport = dev->port->lldd_port;
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_remote_device_reset(%p) returned %d!\n",
- __func__, idev, status);
+ /* Suspend the RNC, terminate all outstanding TCs. */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
rc = TMF_RESP_FUNC_FAILED;
goto out;
}
-
- if (scsi_is_sas_phy_local(phy)) {
- struct isci_phy *iphy = &ihost->phys[phy->number];
-
- rc = isci_port_perform_hard_reset(ihost, iport, iphy);
- } else
- rc = sas_phy_reset(phy, !dev_is_sata(dev));
-
- /* Terminate in-progress I/O now. */
- isci_remote_device_nuke_requests(ihost, idev);
-
- /* Since all pending TCs have been cleaned, resume the RNC. */
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset_complete(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_remote_device_reset_complete(%p) "
- "returned %d!\n", __func__, idev, status);
+ /* Note that since the termination for outstanding requests succeeded,
+ * this function will return success. This is because the resets will
+ * only fail if the device has been removed (ie. hotplug), and the
+ * primary duty of this function is to cleanup tasks, so that is the
+ * relevant status.
+ */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (scsi_is_sas_phy_local(phy)) {
+ struct isci_phy *iphy = &ihost->phys[phy->number];
+
+ reset_stat = isci_port_perform_hard_reset(ihost, iport,
+ iphy);
+ } else
+ reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
}
+ /* Explicitly resume the RNC here, since there was no task sent. */
+ isci_remote_device_resume_from_abort(ihost, idev);
- dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
+ __func__, idev, reset_stat);
out:
sas_put_local_phy(phy);
return rc;
@@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
int ret;
spin_lock_irqsave(&ihost->scic_lock, flags);
- idev = isci_lookup_device(dev);
+ idev = isci_get_device(dev->lldd_dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 7b6d0e32fd9b..9c06cbad1d26 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -63,19 +63,6 @@
struct isci_request;
/**
- * enum isci_tmf_cb_state - This enum defines the possible states in which the
- * TMF callback function is invoked during the TMF execution process.
- *
- *
- */
-enum isci_tmf_cb_state {
-
- isci_tmf_init_state = 0,
- isci_tmf_started,
- isci_tmf_timed_out
-};
-
-/**
* enum isci_tmf_function_codes - This enum defines the possible preparations
* of task management requests.
*
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes {
isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
isci_tmf_ssp_lun_reset = TMF_LU_RESET,
};
+
/**
* struct isci_tmf - This class represents the task management object which
* acts as an interface to libsas for processing task management requests
@@ -106,15 +94,6 @@ struct isci_tmf {
u16 io_tag;
enum isci_tmf_function_codes tmf_code;
int status;
-
- /* The optional callback function allows the user process to
- * track the TMF transmit / timeout conditions.
- */
- void (*cb_state_func)(
- enum isci_tmf_cb_state,
- struct isci_tmf *, void *);
- void *cb_data;
-
};
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
@@ -208,113 +187,4 @@ int isci_queuecommand(
struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *));
-/**
- * enum isci_completion_selection - This enum defines the possible actions to
- * take with respect to a given request's notification back to libsas.
- *
- *
- */
-enum isci_completion_selection {
-
- isci_perform_normal_io_completion, /* Normal notify (task_done) */
- isci_perform_aborted_io_completion, /* No notification. */
- isci_perform_error_io_completion /* Use sas_task_abort */
-};
-
-/**
- * isci_task_set_completion_status() - This function sets the completion status
- * for the request.
- * @task: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
-* @return The new notification mode for the request.
-*/
-static inline enum isci_completion_selection
-isci_task_set_completion_status(
- struct sas_task *task,
- enum service_response response,
- enum exec_status status,
- enum isci_completion_selection task_notification_selection)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
-
- /* If a device reset is being indicated, make sure the I/O
- * is in the error path.
- */
- if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
- /* Fail the I/O to make sure it goes into the error path. */
- response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
-
- task_notification_selection = isci_perform_error_io_completion;
- }
- task->task_status.resp = response;
- task->task_status.stat = status;
-
- switch (task->task_proto) {
-
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-
- if (task_notification_selection
- == isci_perform_error_io_completion) {
- /* SATA/STP I/O has it's own means of scheduling device
- * error handling on the normal path.
- */
- task_notification_selection
- = isci_perform_normal_io_completion;
- }
- break;
- default:
- break;
- }
-
- switch (task_notification_selection) {
-
- case isci_perform_error_io_completion:
-
- if (task->task_proto == SAS_PROTOCOL_SMP) {
- /* There is no error escalation in the SMP case.
- * Convert to a normal completion to avoid the
- * timeout in the discovery path and to let the
- * next action take place quickly.
- */
- task_notification_selection
- = isci_perform_normal_io_completion;
-
- /* Fall through to the normal case... */
- } else {
- /* Use sas_task_abort */
- /* Leave SAS_TASK_STATE_DONE clear
- * Leave SAS_TASK_AT_INITIATOR set.
- */
- break;
- }
-
- case isci_perform_aborted_io_completion:
- /* This path can occur with task-managed requests as well as
- * requests terminated because of LUN or device resets.
- */
- /* Fall through to the normal case... */
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- break;
- default:
- WARN_ONCE(1, "unknown task_notification_selection: %d\n",
- task_notification_selection);
- break;
- }
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- return task_notification_selection;
-
-}
#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index 16f88ab939c8..04a6d0d59a22 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -57,31 +57,19 @@
#include "unsolicited_frame_control.h"
#include "registers.h"
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct sci_unsolicited_frame *uf;
- u32 buf_len, header_len, i;
- dma_addr_t dma;
- size_t size;
- void *virt;
-
- /*
- * Prepare all of the memory sizes for the UF headers, UF address
- * table, and UF buffers themselves.
- */
- buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
- header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
- size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
+ dma_addr_t dma = ihost->ufi_dma;
+ void *virt = ihost->ufi_buf;
+ int i;
/*
* The Unsolicited Frame buffers are set at the start of the UF
* memory descriptor entry. The headers and address table will be
* placed after the buffers.
*/
- virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
- if (!virt)
- return -ENOMEM;
/*
* Program the location of the UF header table into the SCU.
@@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* headers, since we program the UF address table pointers to
* NULL.
*/
- uf_control->headers.physical_address = dma + buf_len;
- uf_control->headers.array = virt + buf_len;
+ uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
+ uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
/*
* Program the location of the UF address table into the SCU.
@@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* byte boundary already due to above programming headers being on a
* 64-bit boundary and headers are on a 64-bytes in size.
*/
- uf_control->address_table.physical_address = dma + buf_len + header_len;
- uf_control->address_table.array = virt + buf_len + header_len;
+ uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+ uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
uf_control->get = 0;
/*
@@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
}
-
- return 0;
}
enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 75d896686f5a..1bc551ec611f 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control {
};
+#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
+#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
+#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
+
struct isci_host;
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
enum sci_status sci_unsolicited_frame_control_get_header(
struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index cc83b66d45b7..c1402fb499ab 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -648,6 +648,7 @@ int fc_lport_destroy(struct fc_lport *lport)
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
+ cancel_delayed_work_sync(&lport->retry_work);
fc_fc4_del_lport(lport);
return 0;
}
@@ -1564,7 +1565,6 @@ static void fc_lport_timeout(struct work_struct *work)
switch (lport->state) {
case LPORT_ST_DISABLED:
- WARN_ON(1);
break;
case LPORT_ST_READY:
break;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3a1ffdd6d831..e5da6da20f8a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -93,6 +93,9 @@ struct lpfc_sli2_slim;
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
+/* queue dump line buffer size */
+#define LPFC_LBUF_SZ 128
+
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
@@ -620,6 +623,7 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
+#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 141e4b40bb55..253d9a857346 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -599,6 +599,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
cmdiocbq->context1 = dd_data;
+ cmdiocbq->context_un.ndlp = ndlp;
cmdiocbq->context2 = rspiocbq;
dd_data->type = TYPE_IOCB;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -3978,6 +3979,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+ case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index edfe61fc52b1..67f7d0a160d1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2010 Emulex. All rights reserved. *
+ * Copyright (C) 2010-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -249,6 +249,7 @@ struct lpfc_sli_config_emb1_subsys {
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
#define COMN_OPCODE_DELETE_OBJECT 0xAE
#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
+#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 330dd7192a7f..9b2a16f3bc79 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,7 @@ int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
+struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -460,6 +461,7 @@ int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
int lpfc_issue_reg_vfi(struct lpfc_vport *);
int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *);
-int lpfc_sli4_read_config(struct lpfc_hba *phba);
-int lpfc_scsi_buf_update(struct lpfc_hba *phba);
-void lpfc_sli4_node_prep(struct lpfc_hba *phba);
+int lpfc_sli4_read_config(struct lpfc_hba *);
+void lpfc_sli4_node_prep(struct lpfc_hba *);
+int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
+void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index af04b0d6688d..3217d63ed282 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -4466,3 +4466,49 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
#endif
return;
}
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_all_queues - dump all the queues with a hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps entries of all the queues asociated with the @phba.
+ **/
+void
+lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
+{
+ int fcp_wqidx;
+
+ /*
+ * Dump Work Queues (WQs)
+ */
+ lpfc_debug_dump_mbx_wq(phba);
+ lpfc_debug_dump_els_wq(phba);
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+ lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
+
+ lpfc_debug_dump_hdr_rq(phba);
+ lpfc_debug_dump_dat_rq(phba);
+ /*
+ * Dump Complete Queues (CQs)
+ */
+ lpfc_debug_dump_mbx_cq(phba);
+ lpfc_debug_dump_els_cq(phba);
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+ lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
+
+ /*
+ * Dump Event Queues (EQs)
+ */
+ lpfc_debug_dump_sp_eq(phba);
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+ lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f83bd944edd8..616c400dae14 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -267,3 +267,421 @@ struct lpfc_idiag {
#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
* discovery */
#endif /* H_LPFC_DEBUG_FS */
+
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_qe - dump an specific entry from a queue
+ * @q: Pointer to the queue descriptor.
+ * @idx: Index to the entry on the queue.
+ *
+ * This function dumps an entry indexed by @idx from a queue specified by the
+ * queue descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
+{
+ char line_buf[LPFC_LBUF_SZ];
+ int i, esize, qe_word_cnt, len;
+ uint32_t *pword;
+
+ /* sanity checks */
+ if (!q)
+ return;
+ if (idx >= q->entry_count)
+ return;
+
+ esize = q->entry_size;
+ qe_word_cnt = esize / sizeof(uint32_t);
+ pword = q->qe[idx].address;
+
+ len = 0;
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+ if (qe_word_cnt > 8)
+ printk(KERN_ERR "%s\n", line_buf);
+
+ for (i = 0; i < qe_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ if (qe_word_cnt > 8) {
+ len = 0;
+ memset(line_buf, 0, LPFC_LBUF_SZ);
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ }
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+ ((uint32_t)*pword) & 0xffffffff);
+ pword++;
+ }
+ if (qe_word_cnt <= 8 || (i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+}
+
+/**
+ * lpfc_debug_dump_q - dump all entries from an specific queue
+ * @q: Pointer to the queue descriptor.
+ *
+ * This function dumps all entries from a queue specified by the queue
+ * descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_q(struct lpfc_queue *q)
+{
+ int idx, entry_count;
+
+ /* sanity check */
+ if (!q)
+ return;
+
+ dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
+ "%d: [qid:%d, type:%d, subtype:%d, "
+ "qe_size:%d, qe_count:%d, "
+ "host_index:%d, port_index:%d]\n",
+ (q->phba)->brd_no,
+ q->queue_id, q->type, q->subtype,
+ q->entry_size, q->entry_count,
+ q->host_index, q->hba_index);
+ entry_count = q->entry_count;
+ for (idx = 0; idx < entry_count; idx++)
+ lpfc_debug_dump_qe(q, idx);
+ printk(KERN_ERR "\n");
+}
+
+/**
+ * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP work queue specified by the
+ * @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ return;
+
+ printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+}
+
+/**
+ * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP complete queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ int fcp_cqidx, fcp_cqid;
+
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ return;
+
+ fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+ if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+ break;
+ if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ return;
+
+ printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cqidx, fcp_cqid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+}
+
+/**
+ * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP event queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ struct lpfc_queue *qdesc;
+ int fcp_eqidx, fcp_eqid;
+ int fcp_cqidx, fcp_cqid;
+
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ return;
+ fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+ if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+ break;
+ if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ return;
+
+ if (phba->cfg_fcp_eq_count == 0) {
+ fcp_eqidx = -1;
+ fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
+ qdesc = phba->sli4_hba.sp_eq;
+ } else {
+ fcp_eqidx = fcp_cqidx;
+ fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
+ qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
+ }
+
+ printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
+ "EQ[Idx:%d|Qid:%d]\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
+ lpfc_debug_dump_q(qdesc);
+}
+
+/**
+ * lpfc_debug_dump_els_wq - dump all entries from the els work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the ELS work queue.
+ **/
+static inline void
+lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
+ phba->sli4_hba.els_wq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the MBOX work queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
+ phba->sli4_hba.mbx_wq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+}
+
+/**
+ * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive data queue.
+ **/
+static inline void
+lpfc_debug_dump_dat_rq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n",
+ phba->sli4_hba.dat_rq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+}
+
+/**
+ * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive header queue.
+ **/
+static inline void
+lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n",
+ phba->sli4_hba.hdr_rq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+}
+
+/**
+ * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the els complete queue.
+ **/
+static inline void
+lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_cq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the mbox complete queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+}
+
+/**
+ * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the slow-path event queue.
+ **/
+static inline void
+lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
+ "EQ[Qid:%d]:\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.sp_eq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Work queue identifier.
+ *
+ * This function dumps all entries from a work queue identified by the queue
+ * identifier.
+ **/
+static inline void
+lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int wq_idx;
+
+ for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
+ if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+ break;
+ if (wq_idx < phba->cfg_fcp_wq_count) {
+ printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+ return;
+ }
+
+ if (phba->sli4_hba.els_wq->queue_id == qid) {
+ printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Mbox work queue identifier.
+ *
+ * This function dumps all entries from a mbox work queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
+{
+ if (phba->sli4_hba.mbx_wq->queue_id == qid) {
+ printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Receive queue identifier.
+ *
+ * This function dumps all entries from a receive queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
+{
+ if (phba->sli4_hba.hdr_rq->queue_id == qid) {
+ printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+ return;
+ }
+ if (phba->sli4_hba.dat_rq->queue_id == qid) {
+ printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from a complete queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int cq_idx = 0;
+
+ do {
+ if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+ break;
+ } while (++cq_idx < phba->cfg_fcp_eq_count);
+
+ if (cq_idx < phba->cfg_fcp_eq_count) {
+ printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+ return;
+ }
+
+ if (phba->sli4_hba.els_cq->queue_id == qid) {
+ printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+ return;
+ }
+
+ if (phba->sli4_hba.mbx_cq->queue_id == qid) {
+ printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from an event queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int eq_idx;
+
+ for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
+ if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
+ break;
+ }
+
+ if (eq_idx < phba->cfg_fcp_eq_count) {
+ printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
+ return;
+ }
+
+ if (phba->sli4_hba.sp_eq->queue_id == qid) {
+ printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
+ }
+}
+
+void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3407b39e0a3f..d54ae1999797 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -230,27 +230,43 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pbuflist->list);
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
+ icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+
+ icmd->un.elsreq64.remoteID = did; /* DID */
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
- icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
}
icmd->ulpBdeCount = 1;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
+ /*
+ * If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (vport->fc_flag & FC_PT2PT))) {
+
+ if (expectRsp) {
+ icmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -438,9 +454,10 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
int rc = 0;
sp = &phba->fc_fabparam;
- /* move forward in case of SLI4 FC port loopback test */
+ /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ !(phba->link_flag & LS_LOOPBACK_MODE) &&
+ !(vport->fc_flag & FC_PT2PT)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
rc = -ENODEV;
@@ -707,14 +724,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- /*
- * If VPI is unreged, driver need to do INIT_VPI
- * before re-registering
- */
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
+
+ /*
+ * For SLI3 and SLI4, the VPI needs to be reregistered in
+ * response to this fabric parameter change event.
+ */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/*
@@ -817,6 +837,17 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
}
+
+ /*
+ * For SLI4, the VFI/VPI are registered AFTER the
+ * Nport with the higher WWPN sends the PLOGI with
+ * an assigned NPortId.
+ */
+
+ /* not equal */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
+ lpfc_issue_reg_vfi(vport);
+
/* Decrement ndlp reference count indicating that ndlp can be
* safely released when other references to it are done.
*/
@@ -2972,7 +3003,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* ABTS we cannot generate and RRQ.
*/
lpfc_set_rrq_active(phba, ndlp,
- cmdiocb->sli4_xritag, 0, 0);
+ cmdiocb->sli4_lxritag, 0, 0);
}
break;
case IOSTAT_LOCAL_REJECT:
@@ -3803,10 +3834,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
- "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
+ "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
+ "fc_flag x%x\n",
elsiocb->iotag, elsiocb->iocb.ulpContext,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_rpi, vport->fc_flag);
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
@@ -4936,8 +4968,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
- did = Fabric_DID;
-
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
/* For a FLOGI we accept, then if our portname is greater
* then the remote portname we initiate Nport login.
@@ -4976,26 +5006,82 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT_PLOGI;
spin_unlock_irq(shost->host_lock);
+
+ /* If we have the high WWPN we can assign our own
+ * myDID; otherwise, we have to WAIT for a PLOGI
+ * from the remote NPort to find out what it
+ * will be.
+ */
+ vport->fc_myDID = PT2PT_LocalID;
}
+
+ /*
+ * The vport state should go to LPFC_FLOGI only
+ * AFTER we issue a FLOGI, not receive one.
+ */
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
+
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI ACC rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
+
} else {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
stat.un.b.vendorUnique = 0;
+
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI LS_RJT rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
+
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL);
+
+ /* Now lets put fc_myDID back to what its supposed to be */
+ vport->fc_myDID = did;
+
return 1;
}
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ /* Now lets put fc_myDID back to what its supposed to be */
+ vport->fc_myDID = did;
+
+ if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ }
+
return 0;
+fail:
+ return 1;
}
/**
@@ -5176,7 +5262,6 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
- mempool_free(pmb, phba->mbox_mem_pool);
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
lpfc_max_els_tries, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
@@ -5184,8 +5269,10 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Decrement the ndlp reference count from previous mbox command */
lpfc_nlp_put(ndlp);
- if (!elsiocb)
+ if (!elsiocb) {
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
+ }
icmd = &elsiocb->iocb;
icmd->ulpContext = rxid;
@@ -5202,7 +5289,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
-
+ mempool_free(pmb, phba->mbox_mem_pool);
/* Xmit ELS RLS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
@@ -5586,7 +5673,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t);
els_rrq = (struct RRQ *) pcmd;
- bf_set(rrq_oxid, els_rrq, rrq->xritag);
+ bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
bf_set(rrq_rxid, els_rrq, rrq->rxid);
bf_set(rrq_did, els_rrq, vport->fc_myDID);
els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
@@ -7873,7 +7960,9 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+ lpfc_set_rrq_active(phba, ndlp,
+ sglq_entry->sli4_lxritag,
+ rxid, 1);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b507536dc5b5..5bb269e224f6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -713,6 +713,7 @@ lpfc_do_work(void *p)
int rc;
set_user_nice(current, -20);
+ current->flags |= PF_NOFREEZE;
phba->data_flags = 0;
while (!kthread_should_stop()) {
@@ -1094,7 +1095,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI)
+ if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
lpfc_initial_flogi(vport);
return;
@@ -2881,9 +2882,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
- /* For private loop just start discovery and we are done. */
- if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- !(vport->fc_flag & FC_PUBLIC_LOOP)) {
+ /*
+ * For private loop or for NPort pt2pt,
+ * just start discovery and we are done.
+ */
+ if ((vport->fc_flag & FC_PT2PT) ||
+ ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+ !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
@@ -5490,9 +5496,9 @@ lpfc_nlp_release(struct kref *kref)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 lpfc_nlp_release: ndlp:x%p "
+ "0279 lpfc_nlp_release: ndlp:x%p did %x "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
atomic_read(&ndlp->kref.refcount));
/* remove ndlp from action. */
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5f280b5ae3db..41bb1d2fb625 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3374,6 +3374,9 @@ typedef struct {
WORD5 w5; /* Header control/status word */
} XMT_SEQ_FIELDS64;
+/* This word is remote ports D_ID for XMIT_ELS_RSP64 */
+#define xmit_els_remoteID xrsqRo
+
/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
typedef struct {
struct ulp_bde64 rcvBde;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 91f09761bd32..f1946dfda5b4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -228,19 +228,15 @@ struct lpfc_sli4_flags {
#define lpfc_idx_rsrc_rdy_MASK 0x00000001
#define lpfc_idx_rsrc_rdy_WORD word0
#define LPFC_IDX_RSRC_RDY 1
-#define lpfc_xri_rsrc_rdy_SHIFT 1
-#define lpfc_xri_rsrc_rdy_MASK 0x00000001
-#define lpfc_xri_rsrc_rdy_WORD word0
-#define LPFC_XRI_RSRC_RDY 1
-#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_SHIFT 1
#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
#define lpfc_rpi_rsrc_rdy_WORD word0
#define LPFC_RPI_RSRC_RDY 1
-#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_SHIFT 2
#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
#define lpfc_vpi_rsrc_rdy_WORD word0
#define LPFC_VPI_RSRC_RDY 1
-#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_SHIFT 3
#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
#define lpfc_vfi_rsrc_rdy_WORD word0
#define LPFC_VFI_RSRC_RDY 1
@@ -3299,7 +3295,13 @@ struct els_request64_wqe {
struct xmit_els_rsp64_wqe {
struct ulp_bde64 bde;
uint32_t response_payload_len;
- uint32_t rsvd4;
+ uint32_t word4;
+#define els_rsp64_sid_SHIFT 0
+#define els_rsp64_sid_MASK 0x00FFFFFF
+#define els_rsp64_sid_WORD word4
+#define els_rsp64_sp_SHIFT 24
+#define els_rsp64_sp_MASK 0x00000001
+#define els_rsp64_sp_WORD word4
struct wqe_did wqe_dest;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t word12;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9598fdcb08ab..411ed48d79da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -64,8 +64,8 @@ static int lpfc_sli4_queue_verify(struct lpfc_hba *);
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
-static void lpfc_free_sgl_list(struct lpfc_hba *);
-static int lpfc_init_sgl_list(struct lpfc_hba *);
+static void lpfc_free_els_sgl_list(struct lpfc_hba *);
+static void lpfc_init_sgl_list(struct lpfc_hba *);
static int lpfc_init_active_sgl_array(struct lpfc_hba *);
static void lpfc_free_active_sgl(struct lpfc_hba *);
static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
@@ -2767,47 +2767,14 @@ lpfc_offline(struct lpfc_hba *phba)
}
/**
- * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine goes through all the scsi buffers in the system and updates the
- * Physical XRIs assigned to the SCSI buffer because these may change after any
- * firmware reset
- *
- * Return codes
- * 0 - successful (for now, it always returns 0)
- **/
-int
-lpfc_scsi_buf_update(struct lpfc_hba *phba)
-{
- struct lpfc_scsi_buf *sb, *sb_next;
-
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->scsi_buf_list_lock);
- list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
- sb->cur_iocbq.sli4_xritag =
- phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
- set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
- phba->sli4_hba.max_cfg_param.xri_used++;
- phba->sli4_hba.xri_count++;
- }
- spin_unlock(&phba->scsi_buf_list_lock);
- spin_unlock_irq(&phba->hbalock);
- return 0;
-}
-
-/**
* lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
* @phba: pointer to lpfc hba data structure.
*
* This routine is to free all the SCSI buffers and IOCBs from the driver
* list back to kernel. It is called from lpfc_pci_remove_one to free
* the internal resources before the device is removed from the system.
- *
- * Return codes
- * 0 - successful (for now, it always returns 0)
**/
-static int
+static void
lpfc_scsi_free(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *sb, *sb_next;
@@ -2833,7 +2800,178 @@ lpfc_scsi_free(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+ struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
+ uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
+ LIST_HEAD(els_sgl_list);
+ LIST_HEAD(scsi_sgl_list);
+ int rc;
+
+ /*
+ * update on pci function's els xri-sgl list
+ */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
+ /* els xri-sgl expanded */
+ xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3157 ELS xri-sgl count increased from "
+ "%d to %d\n", phba->sli4_hba.els_xri_cnt,
+ els_xri_cnt);
+ /* allocate the additional els sgls */
+ for (i = 0; i < xri_cnt; i++) {
+ sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+ GFP_KERNEL);
+ if (sglq_entry == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2562 Failure to allocate an "
+ "ELS sgl entry:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->buff_type = GEN_BUFF_TYPE;
+ sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
+ &sglq_entry->phys);
+ if (sglq_entry->virt == NULL) {
+ kfree(sglq_entry);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2563 Failure to allocate an "
+ "ELS mbuf:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sgl = sglq_entry->virt;
+ memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list, &els_sgl_list);
+ }
+ spin_lock(&phba->hbalock);
+ list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&phba->hbalock);
+ } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
+ /* els xri-sgl shrinked */
+ xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3158 ELS xri-sgl count decreased from "
+ "%d to %d\n", phba->sli4_hba.els_xri_cnt,
+ els_xri_cnt);
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
+ spin_unlock_irq(&phba->hbalock);
+ /* release extra els sgls from list */
+ for (i = 0; i < xri_cnt; i++) {
+ list_remove_head(&els_sgl_list,
+ sglq_entry, struct lpfc_sglq, list);
+ if (sglq_entry) {
+ lpfc_mbuf_free(phba, sglq_entry->virt,
+ sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+ }
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock_irq(&phba->hbalock);
+ } else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3163 ELS xri-sgl count unchanged: %d\n",
+ els_xri_cnt);
+ phba->sli4_hba.els_xri_cnt = els_xri_cnt;
+
+ /* update xris to els sgls on the list */
+ sglq_entry = NULL;
+ sglq_entry_next = NULL;
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+ &phba->sli4_hba.lpfc_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2400 Failed to allocate xri for "
+ "ELS sgl\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /*
+ * update on pci function's allocated scsi xri-sgl list
+ */
+ phba->total_scsi_bufs = 0;
+
+ /* maximum number of xris available for scsi buffers */
+ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
+ els_xri_cnt;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2401 Current allocated SCSI xri-sgl count:%d, "
+ "maximum SCSI xri count:%d\n",
+ phba->sli4_hba.scsi_xri_cnt,
+ phba->sli4_hba.scsi_xri_max);
+
+ spin_lock_irq(&phba->scsi_buf_list_lock);
+ list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
+ spin_unlock_irq(&phba->scsi_buf_list_lock);
+
+ if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
+ /* max scsi xri shrinked below the allocated scsi buffers */
+ scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
+ phba->sli4_hba.scsi_xri_max;
+ /* release the extra allocated scsi buffers */
+ for (i = 0; i < scsi_xri_cnt; i++) {
+ list_remove_head(&scsi_sgl_list, psb,
+ struct lpfc_scsi_buf, list);
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
+ psb->dma_handle);
+ kfree(psb);
+ }
+ spin_lock_irq(&phba->scsi_buf_list_lock);
+ phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
+ spin_unlock_irq(&phba->scsi_buf_list_lock);
+ }
+
+ /* update xris associated to remaining allocated scsi buffers */
+ psb = NULL;
+ psb_next = NULL;
+ list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2560 Failed to allocate xri for "
+ "scsi buffer\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+ spin_lock(&phba->scsi_buf_list_lock);
+ list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
+ spin_unlock(&phba->scsi_buf_list_lock);
+
return 0;
+
+out_free_mem:
+ lpfc_free_els_sgl_list(phba);
+ lpfc_scsi_free(phba);
+ return rc;
}
/**
@@ -4636,18 +4774,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
goto out_free_bsmbx;
- /* Initialize and populate the iocb list per host */
- rc = lpfc_init_sgl_list(phba);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1400 Failed to initialize sgl list.\n");
- goto out_destroy_cq_event_pool;
- }
+ /* Initialize sgl lists per host */
+ lpfc_init_sgl_list(phba);
+
+ /* Allocate and initialize active sgl array */
rc = lpfc_init_active_sgl_array(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1430 Failed to initialize sgl list.\n");
- goto out_free_sgl_list;
+ goto out_destroy_cq_event_pool;
}
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
@@ -4722,8 +4857,6 @@ out_remove_rpi_hdrs:
lpfc_sli4_remove_rpi_hdrs(phba);
out_free_active_sgl:
lpfc_free_active_sgl(phba);
-out_free_sgl_list:
- lpfc_free_sgl_list(phba);
out_destroy_cq_event_pool:
lpfc_sli4_cq_event_pool_destroy(phba);
out_free_bsmbx:
@@ -4760,10 +4893,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the ELS sgl list */
lpfc_free_active_sgl(phba);
- lpfc_free_sgl_list(phba);
-
- /* Free the SCSI sgl management array */
- kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+ lpfc_free_els_sgl_list(phba);
/* Free the completion queue EQ event pool */
lpfc_sli4_cq_event_release_all(phba);
@@ -4990,29 +5120,42 @@ out_free_iocbq:
}
/**
- * lpfc_free_sgl_list - Free sgl list.
+ * lpfc_free_sgl_list - Free a given sgl list.
* @phba: pointer to lpfc hba data structure.
+ * @sglq_list: pointer to the head of sgl list.
*
- * This routine is invoked to free the driver's sgl list and memory.
+ * This routine is invoked to free a give sgl list and memory.
**/
-static void
-lpfc_free_sgl_list(struct lpfc_hba *phba)
+void
+lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
{
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+
+ list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
+ list_del(&sglq_entry->list);
+ lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+}
+
+/**
+ * lpfc_free_els_sgl_list - Free els sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's els sgl list and memory.
+ **/
+static void
+lpfc_free_els_sgl_list(struct lpfc_hba *phba)
+{
LIST_HEAD(sglq_list);
+ /* Retrieve all els sgls from driver list */
spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
spin_unlock_irq(&phba->hbalock);
- list_for_each_entry_safe(sglq_entry, sglq_next,
- &sglq_list, list) {
- list_del(&sglq_entry->list);
- lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
- kfree(sglq_entry);
- phba->sli4_hba.total_sglq_bufs--;
- }
- kfree(phba->sli4_hba.lpfc_els_sgl_array);
+ /* Now free the sgl list */
+ lpfc_free_sgl_list(phba, &sglq_list);
}
/**
@@ -5057,99 +5200,19 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
* This routine is invoked to allocate and initizlize the driver's sgl
* list and set up the sgl xritag tag array accordingly.
*
- * Return codes
- * 0 - successful
- * other values - error
**/
-static int
+static void
lpfc_init_sgl_list(struct lpfc_hba *phba)
{
- struct lpfc_sglq *sglq_entry = NULL;
- int i;
- int els_xri_cnt;
-
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 ELS XRI count %d.\n",
- els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
- /* Sanity check on XRI management */
- if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2562 No room left for SCSI XRI allocation: "
- "max_xri=%d, els_xri=%d\n",
- phba->sli4_hba.max_cfg_param.max_xri,
- els_xri_cnt);
- return -ENOMEM;
- }
-
- /* Allocate memory for the ELS XRI management array */
- phba->sli4_hba.lpfc_els_sgl_array =
- kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
- GFP_KERNEL);
-
- if (!phba->sli4_hba.lpfc_els_sgl_array) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2401 Failed to allocate memory for ELS "
- "XRI management array of size %d.\n",
- els_xri_cnt);
- return -ENOMEM;
- }
+ /* els xri-sgl book keeping */
+ phba->sli4_hba.els_xri_cnt = 0;
- /* Keep the SCSI XRI into the XRI management array */
- phba->sli4_hba.scsi_xri_max =
- phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ /* scsi xri-buffer book keeping */
phba->sli4_hba.scsi_xri_cnt = 0;
- phba->sli4_hba.lpfc_scsi_psb_array =
- kzalloc((sizeof(struct lpfc_scsi_buf *) *
- phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
-
- if (!phba->sli4_hba.lpfc_scsi_psb_array) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2563 Failed to allocate memory for SCSI "
- "XRI management array of size %d.\n",
- phba->sli4_hba.scsi_xri_max);
- kfree(phba->sli4_hba.lpfc_els_sgl_array);
- return -ENOMEM;
- }
-
- for (i = 0; i < els_xri_cnt; i++) {
- sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
- if (sglq_entry == NULL) {
- printk(KERN_ERR "%s: only allocated %d sgls of "
- "expected %d count. Unloading driver.\n",
- __func__, i, els_xri_cnt);
- goto out_free_mem;
- }
-
- sglq_entry->buff_type = GEN_BUFF_TYPE;
- sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
- if (sglq_entry->virt == NULL) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate mbuf.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
- sglq_entry->sgl = sglq_entry->virt;
- memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
-
- /* The list order is used by later block SGL registraton */
- spin_lock_irq(&phba->hbalock);
- sglq_entry->state = SGL_FREED;
- list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
- phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
- phba->sli4_hba.total_sglq_bufs++;
- spin_unlock_irq(&phba->hbalock);
- }
- return 0;
-
-out_free_mem:
- kfree(phba->sli4_hba.lpfc_scsi_psb_array);
- lpfc_free_sgl_list(phba);
- return -ENOMEM;
}
/**
@@ -7320,9 +7383,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2890 Port error detected during port "
- "reset(%d): port status reg 0x%x, "
+ "reset(%d): wait_tmo:%d ms, "
+ "port status reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
- num_resets, reg_data.word0,
+ num_resets, rdy_chk*10,
+ reg_data.word0,
phba->work_status[0],
phba->work_status[1]);
rc = -ENODEV;
@@ -8694,8 +8759,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Release all the vports against this physical port */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
fc_vport_terminate(vports[i]->fc_vport);
+ }
lpfc_destroy_vport_work_array(phba, vports);
/* Remove FC host and then SCSI host with the physical port */
@@ -9115,8 +9183,12 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
return 50;
else if (max_xri <= 1024)
return 100;
- else
+ else if (max_xri <= 1536)
return 150;
+ else if (max_xri <= 2048)
+ return 200;
+ else
+ return 250;
} else
return 0;
}
@@ -9455,8 +9527,11 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/* Release all the vports against this physical port */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
fc_vport_terminate(vports[i]->fc_vport);
+ }
lpfc_destroy_vport_work_array(phba, vports);
/* Remove FC host and then SCSI host with the physical port */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 15ca2a9a0cdd..9133a97f045f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -367,8 +367,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
+ /* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
/* rcv'ed PLOGI decides what our NPortId will be */
vport->fc_myDID = icmd->un.rcvels.parmRo;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -382,6 +384,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
+ /*
+ * For SLI4, the VFI/VPI are registered AFTER the
+ * Nport with the higher WWPN sends us a PLOGI with
+ * our assigned NPortId.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_reg_vfi(vport);
lpfc_can_disctmo(vport);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 88f3a83dbd2e..66e09069f281 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -399,6 +399,14 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
+ /*
+ * The error and success command counters are global per
+ * driver instance. If another handler has already
+ * operated on this error event, just exit.
+ */
+ if (num_rsrc_err == 0)
+ return;
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -688,7 +696,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (ndlp) {
- lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+ lpfc_set_rrq_active(phba, ndlp,
+ psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
lpfc_release_scsi_buf_s4(phba, psb);
@@ -718,72 +727,162 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
+ * @post_sblist: pointer to the scsi buffer list.
*
- * This routine walks the list of scsi buffers that have been allocated and
- * repost them to the HBA by using SGL block post. This is needed after a
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
- * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
- * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ * This routine walks a list of scsi buffers that was passed in. It attempts
+ * to construct blocks of scsi buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_sblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
*
- * Returns: 0 = success, non-zero failure.
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
**/
int
-lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_sblist, int sb_count)
{
- struct lpfc_scsi_buf *psb;
- int index, status, bcnt = 0, rcnt = 0, rc = 0;
- LIST_HEAD(sblist);
-
- for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
- psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
- if (psb) {
- /* Remove from SCSI buffer list */
- list_del(&psb->list);
- /* Add it to a local SCSI buffer list */
- list_add_tail(&psb->list, &sblist);
- if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
- bcnt = rcnt;
- rcnt = 0;
+ struct lpfc_scsi_buf *psb, *psb_next;
+ int status;
+ int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+ dma_addr_t pdma_phys_bpl1;
+ int last_xritag = NO_XRI;
+ LIST_HEAD(prep_sblist);
+ LIST_HEAD(blck_sblist);
+ LIST_HEAD(scsi_sblist);
+
+ /* sanity check */
+ if (sb_count <= 0)
+ return -EINVAL;
+
+ list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
+ list_del_init(&psb->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&psb->list, &prep_sblist);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&psb->list, &prep_sblist);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt;
+ block_cnt = 0;
}
- } else
- /* A hole present in the XRI array, need to skip */
- bcnt = rcnt;
+ }
+ num_posting++;
+ last_xritag = psb->cur_iocbq.sli4_xritag;
- if (index == phba->sli4_hba.scsi_xri_cnt - 1)
- /* End of XRI array for SCSI buffer, complete */
- bcnt = rcnt;
+ /* end of repost sgl list condition for SCSI buffers */
+ if (num_posting == sb_count) {
+ if (post_cnt == 0) {
+ /* last sgl posting block */
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ /* last single sgl with non-contiguous xri */
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ status = lpfc_sli4_post_sgl(phba,
+ psb->dma_phys_bpl,
+ pdma_phys_bpl1,
+ psb->cur_iocbq.sli4_xritag);
+ if (status) {
+ /* failure, put on abort scsi list */
+ psb->exch_busy = 1;
+ } else {
+ /* success, put on SCSI buffer list */
+ psb->exch_busy = 0;
+ psb->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ /* success, put on SCSI buffer sgl list */
+ list_add_tail(&psb->list, &scsi_sblist);
+ }
+ }
- /* Continue until collect up to a nembed page worth of sgls */
- if (bcnt == 0)
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
continue;
- /* Now, post the SCSI buffer list sgls as a block */
- if (!phba->sli4_hba.extents_in_use)
- status = lpfc_sli4_post_scsi_sgl_block(phba,
- &sblist,
- bcnt);
- else
- status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
- &sblist,
- bcnt);
- /* Reset SCSI buffer count for next round of posting */
- bcnt = 0;
- while (!list_empty(&sblist)) {
- list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
- list);
+
+ /* post block of SCSI buffer list sgls */
+ status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
+ post_cnt);
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset SCSI buffer post count for next round of posting */
+ post_cnt = 0;
+
+ /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
+ while (!list_empty(&blck_sblist)) {
+ list_remove_head(&blck_sblist, psb,
+ struct lpfc_scsi_buf, list);
if (status) {
- /* Put this back on the abort scsi list */
+ /* failure, put on abort scsi list */
psb->exch_busy = 1;
- rc++;
} else {
+ /* success, put on SCSI buffer list */
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
+ num_posted++;
}
- /* Put it back into the SCSI buffer list */
- lpfc_release_scsi_buf_s4(phba, psb);
+ list_add_tail(&psb->list, &scsi_sblist);
}
}
+ /* Push SCSI buffers with sgl posted to the availble list */
+ while (!list_empty(&scsi_sblist)) {
+ list_remove_head(&scsi_sblist, psb,
+ struct lpfc_scsi_buf, list);
+ lpfc_release_scsi_buf_s4(phba, psb);
+ }
+ return num_posted;
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+ LIST_HEAD(post_sblist);
+ int num_posted, rc = 0;
+
+ /* get all SCSI buffers need to repost to a local list */
+ spin_lock(&phba->scsi_buf_list_lock);
+ list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
+ spin_unlock(&phba->scsi_buf_list_lock);
+
+ /* post the list of scsi buffer sgls to port if available */
+ if (!list_empty(&post_sblist)) {
+ num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
+ phba->sli4_hba.scsi_xri_cnt);
+ /* failed to post any scsi buffer, return error */
+ if (num_posted == 0)
+ rc = -EIO;
+ }
return rc;
}
@@ -792,12 +891,13 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
* @vport: The virtual port for which this call being executed.
* @num_to_allocate: The requested number of buffers to allocate.
*
- * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * This routine allocates scsi buffers for device with SLI-4 interface spec,
* the scsi buffer contains all the necessary information needed to initiate
- * a SCSI I/O.
+ * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
+ * them on a list, it post them to the port by using SGL block post.
*
* Return codes:
- * int - number of scsi buffers that were allocated.
+ * int - number of scsi buffers that were allocated and posted.
* 0 = failure, less than num_to_alloc is a partial failure.
**/
static int
@@ -810,22 +910,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
- int status = 0, index;
- int bcnt;
- int non_sequential_xri = 0;
- LIST_HEAD(sblist);
+ uint16_t iotag, lxri = 0;
+ int bcnt, num_posted;
+ LIST_HEAD(prep_sblist);
+ LIST_HEAD(post_sblist);
+ LIST_HEAD(scsi_sblist);
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
if (!psb)
break;
-
/*
- * Get memory from the pci pool to map the virt space to pci bus
- * space for an I/O. The DMA buffer includes space for the
- * struct fcp_cmnd, struct fcp_rsp and the number of bde's
- * necessary to support the sg_tablesize.
+ * Get memory from the pci pool to map the virt space to
+ * pci bus space for an I/O. The DMA buffer includes space
+ * for the struct fcp_cmnd, struct fcp_rsp and the number
+ * of bde's necessary to support the sg_tablesize.
*/
psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
@@ -833,8 +932,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
kfree(psb);
break;
}
-
- /* Initialize virtual ptrs to dma_buf region. */
memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Allocate iotag for psb->cur_iocbq. */
@@ -855,16 +952,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
psb->cur_iocbq.sli4_lxritag = lxri;
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- if (last_xritag != NO_XRI
- && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
- non_sequential_xri = 1;
- } else
- list_add_tail(&psb->list, &sblist);
- last_xritag = psb->cur_iocbq.sli4_xritag;
-
- index = phba->sli4_hba.scsi_xri_cnt++;
psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
-
psb->fcp_bpl = psb->data;
psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
@@ -880,9 +968,9 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
/*
- * The first two bdes are the FCP_CMD and FCP_RSP. The balance
- * are sg list bdes. Initialize the first two and leave the
- * rest for queuecommand.
+ * The first two bdes are the FCP_CMD and FCP_RSP.
+ * The balance are sg list bdes. Initialize the
+ * first two and leave the rest for queuecommand.
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
@@ -917,62 +1005,31 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
iocb->ulpBdeCount = 1;
iocb->ulpLe = 1;
iocb->ulpClass = CLASS3;
- psb->cur_iocbq.context1 = psb;
+ psb->cur_iocbq.context1 = psb;
if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
else
pdma_phys_bpl1 = 0;
psb->dma_phys_bpl = pdma_phys_bpl;
- phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
- if (non_sequential_xri) {
- status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
- pdma_phys_bpl1,
- psb->cur_iocbq.sli4_xritag);
- if (status) {
- /* Put this back on the abort scsi list */
- psb->exch_busy = 1;
- } else {
- psb->exch_busy = 0;
- psb->status = IOSTAT_SUCCESS;
- }
- /* Put it back into the SCSI buffer list */
- lpfc_release_scsi_buf_s4(phba, psb);
- break;
- }
- }
- if (bcnt) {
- if (!phba->sli4_hba.extents_in_use)
- status = lpfc_sli4_post_scsi_sgl_block(phba,
- &sblist,
- bcnt);
- else
- status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
- &sblist,
- bcnt);
-
- if (status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "3021 SCSI SGL post error %d\n",
- status);
- bcnt = 0;
- }
- /* Reset SCSI buffer count for next round of posting */
- while (!list_empty(&sblist)) {
- list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
- list);
- if (status) {
- /* Put this back on the abort scsi list */
- psb->exch_busy = 1;
- } else {
- psb->exch_busy = 0;
- psb->status = IOSTAT_SUCCESS;
- }
- /* Put it back into the SCSI buffer list */
- lpfc_release_scsi_buf_s4(phba, psb);
- }
+
+ /* add the scsi buffer to a post list */
+ list_add_tail(&psb->list, &post_sblist);
+ spin_lock_irq(&phba->scsi_buf_list_lock);
+ phba->sli4_hba.scsi_xri_cnt++;
+ spin_unlock_irq(&phba->scsi_buf_list_lock);
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_BG,
+ "3021 Allocate %d out of %d requested new SCSI "
+ "buffers\n", bcnt, num_to_alloc);
+
+ /* post the list of scsi buffer sgls to port if available */
+ if (!list_empty(&post_sblist))
+ num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
+ &post_sblist, bcnt);
+ else
+ num_posted = 0;
- return bcnt + non_sequential_xri;
+ return num_posted;
}
/**
@@ -1043,7 +1100,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
list) {
if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_xritag))
+ lpfc_cmd->cur_iocbq.sli4_lxritag))
continue;
list_del(&lpfc_cmd->list);
found = 1;
@@ -1897,7 +1954,9 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_addr_t physaddr;
int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
+#endif
uint32_t checking = 1;
uint32_t reftag;
unsigned blksize;
@@ -2034,7 +2093,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
int datadir = sc->sc_data_direction;
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
+#endif
uint32_t checking = 1;
uint32_t reftag;
uint8_t txop, rxop;
@@ -2253,7 +2314,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t reftag;
unsigned blksize;
uint8_t txop, rxop;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
+#endif
uint32_t checking = 1;
uint32_t dma_len;
uint32_t dma_offset = 0;
@@ -2383,7 +2446,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t reftag;
uint8_t txop, rxop;
uint32_t dma_len;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
+#endif
uint32_t checking = 1;
uint32_t dma_offset = 0;
int num_sge = 0;
@@ -3604,11 +3669,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
logit = LOG_FCP | LOG_FCP_UNDER;
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9030 FCP cmd x%x failed <%d/%d> "
- "status: x%x result: x%x Data: x%x x%x\n",
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x\n",
cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
cmd->device ? cmd->device->lun : 0xffff,
lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID, pnode->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
@@ -3689,8 +3759,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* ABTS we cannot generate and RRQ.
*/
lpfc_set_rrq_active(phba, pnode,
- lpfc_cmd->cur_iocbq.sli4_xritag,
- 0, 0);
+ lpfc_cmd->cur_iocbq.sli4_lxritag,
+ 0, 0);
}
/* else: fall through */
default:
@@ -4348,8 +4418,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret = fc_block_scsi_eh(cmnd);
if (ret)
return ret;
+
+ spin_lock_irq(&phba->hbalock);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3168 SCSI Layer abort requested I/O has been "
+ "flushed by LLD.\n");
+ return FAILED;
+ }
+
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
if (!lpfc_cmd) {
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %d\n",
@@ -4357,23 +4439,34 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+ iocb = &lpfc_cmd->cur_iocbq;
+ /* the command is in process of being cancelled */
+ if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3169 SCSI Layer abort requested I/O has been "
+ "cancelled by LLD.\n");
+ return FAILED;
+ }
/*
* If pCmd field of the corresponding lpfc_scsi_buf structure
* points to a different SCSI command, then the driver has
* already completed this command, but the midlayer did not
- * see the completion before the eh fired. Just return
- * SUCCESS.
+ * see the completion before the eh fired. Just return SUCCESS.
*/
- iocb = &lpfc_cmd->cur_iocbq;
- if (lpfc_cmd->pCmd != cmnd)
- goto out;
+ if (lpfc_cmd->pCmd != cmnd) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3170 SCSI Layer abort requested I/O has been "
+ "completed by LLD.\n");
+ goto out_unlock;
+ }
BUG_ON(iocb->context1 != lpfc_cmd);
- abtsiocb = lpfc_sli_get_iocbq(phba);
+ abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
- goto out;
+ goto out_unlock;
}
/*
@@ -4405,6 +4498,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
+ /* no longer need the lock after this point */
+ spin_unlock_irq(&phba->hbalock);
+
if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
@@ -4421,10 +4517,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
(2*vport->cfg_devloss_tmo*HZ));
-
- spin_lock_irq(shost->host_lock);
lpfc_cmd->waitq = NULL;
- spin_unlock_irq(shost->host_lock);
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
@@ -4434,8 +4527,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
"LUN %d\n",
ret, cmnd->device->id, cmnd->device->lun);
}
+ goto out;
- out:
+out_unlock:
+ spin_unlock_irq(&phba->hbalock);
+out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
"LUN %d\n", ret, cmnd->device->id,
@@ -4863,6 +4959,43 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
}
/**
+ * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does host reset to the adaptor port. It brings the HBA
+ * offline, performs a board restart, and then brings the board back online.
+ * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
+ * reject all outstanding SCSI commands to the host and error returned
+ * back to SCSI mid-level. As this will be SCSI mid-level's last resort
+ * of error handling, it will only return error if resetting of the adapter
+ * is not successful; in all other cases, will return success.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int rc, ret = SUCCESS;
+
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ rc = lpfc_sli_brdrestart(phba);
+ if (rc)
+ ret = FAILED;
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+ return ret;
+}
+
+/**
* lpfc_slave_alloc - scsi_host_template slave_alloc entry point
* @sdev: Pointer to scsi_device.
*
@@ -4994,6 +5127,7 @@ struct scsi_host_template lpfc_template = {
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .eh_host_reset_handler = lpfc_host_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dbaf5b963bff..b4720a109817 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -67,6 +67,8 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
+static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
+ int);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -500,7 +502,7 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* allocation is successful, it returns pointer to the newly
* allocated iocb object else it returns NULL.
**/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
@@ -875,6 +877,9 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
ndlp = piocbq->context_un.ndlp;
+ else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
+ (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+ ndlp = piocbq->context_un.ndlp;
else
ndlp = piocbq->context1;
@@ -883,7 +888,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
@@ -1257,7 +1262,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
- piocb->iocb_flag |= LPFC_IO_ON_Q;
+ piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
if (pring->txcmplq_cnt > pring->txcmplq_max)
pring->txcmplq_max = pring->txcmplq_cnt;
@@ -2556,9 +2561,9 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
pring->txcmplq_cnt--;
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
return cmd_iocb;
}
@@ -2591,14 +2596,14 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del_init(&cmd_iocb->list);
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
+ return cmd_iocb;
}
- return cmd_iocb;
}
-
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0372 iotag x%x is out off range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
@@ -3466,6 +3471,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txcmplq_cnt = 0;
+
+ /* Indicate the I/O queues are flushed */
+ phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
/* Flush the txq */
@@ -3877,6 +3885,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
+ int rc;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3905,12 +3914,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
/* Perform FCoE PCI function reset */
lpfc_sli4_queue_destroy(phba);
- lpfc_pci_function_reset(phba);
+ rc = lpfc_pci_function_reset(phba);
/* Restore PCI cmd register */
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
- return 0;
+ return rc;
}
/**
@@ -4002,6 +4011,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint32_t hba_aer_enabled;
+ int rc;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -4011,7 +4021,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
/* Take PCIe device Advanced Error Reporting (AER) state */
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
- lpfc_sli4_brdreset(phba);
+ rc = lpfc_sli4_brdreset(phba);
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
@@ -4028,7 +4038,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
lpfc_hba_down_post(phba);
- return 0;
+ return rc;
}
/**
@@ -4967,7 +4977,12 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
&rsrc_info->u.rsp);
*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
&rsrc_info->u.rsp);
- err_exit:
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3162 Retrieved extents type-%d from port: count:%d, "
+ "size:%d\n", type, *extnt_count, *extnt_size);
+
+err_exit:
mempool_free(mbox, phba->mbox_mem_pool);
return rc;
}
@@ -5051,7 +5066,7 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
* 0: if successful
**/
static int
-lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
{
int rc = 0;
@@ -5060,7 +5075,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
uint32_t alloc_len, mbox_tmo;
/* Calculate the total requested length of the dma memory */
- req_len = *extnt_cnt * sizeof(uint16_t);
+ req_len = extnt_cnt * sizeof(uint16_t);
/*
* Calculate the size of an embedded mailbox. The uint32_t
@@ -5075,7 +5090,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
*/
*emb = LPFC_SLI4_MBX_EMBED;
if (req_len > emb_len) {
- req_len = *extnt_cnt * sizeof(uint16_t) +
+ req_len = extnt_cnt * sizeof(uint16_t) +
sizeof(union lpfc_sli4_cfg_shdr) +
sizeof(uint32_t);
*emb = LPFC_SLI4_MBX_NEMBED;
@@ -5091,7 +5106,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
"size (x%x)\n", alloc_len, req_len);
return -ENOMEM;
}
- rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
if (unlikely(rc))
return -EIO;
@@ -5149,17 +5164,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
return -ENOMEM;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
- "2903 Available Resource Extents "
- "for resource type 0x%x: Count: 0x%x, "
- "Size 0x%x\n", type, rsrc_cnt,
- rsrc_size);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
+ "2903 Post resource extents type-0x%x: "
+ "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
- rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
if (unlikely(rc)) {
rc = -EIO;
goto err_exit;
@@ -5250,6 +5263,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
rc = -ENOMEM;
goto err_exit;
}
+ phba->sli4_hba.max_cfg_param.xri_used = 0;
phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
sizeof(uint16_t),
GFP_KERNEL);
@@ -5420,7 +5434,6 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
case LPFC_RSC_TYPE_FCOE_XRI:
kfree(phba->sli4_hba.xri_bmask);
kfree(phba->sli4_hba.xri_ids);
- bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
&phba->sli4_hba.lpfc_xri_blk_list, list) {
list_del_init(&rsrc_blk->list);
@@ -5612,7 +5625,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
goto free_vpi_ids;
}
phba->sli4_hba.max_cfg_param.xri_used = 0;
- phba->sli4_hba.xri_count = 0;
phba->sli4_hba.xri_ids = kzalloc(count *
sizeof(uint16_t),
GFP_KERNEL);
@@ -5694,7 +5706,6 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
kfree(phba->sli4_hba.xri_bmask);
kfree(phba->sli4_hba.xri_ids);
- bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
kfree(phba->sli4_hba.vfi_bmask);
kfree(phba->sli4_hba.vfi_ids);
bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
@@ -5853,6 +5864,149 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
}
/**
+ * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of els buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. It attempts to construct blocks
+ * of els buffer sgls which contains contiguous xris and uses the non-embedded
+ * SGL block post mailbox commands to post them to the port. For single els
+ * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
+ * mailbox command for posting.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+static int
+lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL;
+ struct lpfc_sglq *sglq_entry_next = NULL;
+ struct lpfc_sglq *sglq_entry_first = NULL;
+ int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int last_xritag = NO_XRI;
+ LIST_HEAD(prep_sgl_list);
+ LIST_HEAD(blck_sgl_list);
+ LIST_HEAD(allc_sgl_list);
+ LIST_HEAD(post_sgl_list);
+ LIST_HEAD(free_sgl_list);
+
+ spin_lock(&phba->hbalock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
+ spin_unlock(&phba->hbalock);
+
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+ &allc_sgl_list, list) {
+ list_del_init(&sglq_entry->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (sglq_entry->sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_sgl_list, &blck_sgl_list);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&sglq_entry->list, &prep_sgl_list);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&sglq_entry->list, &prep_sgl_list);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_sgl_list,
+ &blck_sgl_list);
+ post_cnt = block_cnt;
+ block_cnt = 0;
+ }
+ }
+ num_posted++;
+
+ /* keep track of last sgl's xritag */
+ last_xritag = sglq_entry->sli4_xritag;
+
+ /* end of repost sgl list condition for els buffers */
+ if (num_posted == phba->sli4_hba.els_xri_cnt) {
+ if (post_cnt == 0) {
+ list_splice_init(&prep_sgl_list,
+ &blck_sgl_list);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ status = lpfc_sli4_post_sgl(phba,
+ sglq_entry->phys, 0,
+ sglq_entry->sli4_xritag);
+ if (!status) {
+ /* successful, put sgl to posted list */
+ list_add_tail(&sglq_entry->list,
+ &post_sgl_list);
+ } else {
+ /* Failure, put sgl to free list */
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_SLI,
+ "3159 Failed to post els "
+ "sgl, xritag:x%x\n",
+ sglq_entry->sli4_xritag);
+ list_add_tail(&sglq_entry->list,
+ &free_sgl_list);
+ spin_lock_irq(&phba->hbalock);
+ phba->sli4_hba.els_xri_cnt--;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ }
+ }
+
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
+ continue;
+
+ /* post the els buffer list sgls as a block */
+ status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
+ post_cnt);
+
+ if (!status) {
+ /* success, put sgl list to posted sgl list */
+ list_splice_init(&blck_sgl_list, &post_sgl_list);
+ } else {
+ /* Failure, put sgl list to free sgl list */
+ sglq_entry_first = list_first_entry(&blck_sgl_list,
+ struct lpfc_sglq,
+ list);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3160 Failed to post els sgl-list, "
+ "xritag:x%x-x%x\n",
+ sglq_entry_first->sli4_xritag,
+ (sglq_entry_first->sli4_xritag +
+ post_cnt - 1));
+ list_splice_init(&blck_sgl_list, &free_sgl_list);
+ spin_lock_irq(&phba->hbalock);
+ phba->sli4_hba.els_xri_cnt -= post_cnt;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset els sgl post count for next round of posting */
+ post_cnt = 0;
+ }
+
+ /* free the els sgls failed to post */
+ lpfc_free_sgl_list(phba, &free_sgl_list);
+
+ /* push els sgls posted to the availble list */
+ if (!list_empty(&post_sgl_list)) {
+ spin_lock(&phba->hbalock);
+ list_splice_init(&post_sgl_list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3161 Failure to post els sgl to port.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -5923,6 +6077,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else
phba->hba_flag &= ~HBA_FIP_SUPPORT;
+ phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+
if (phba->sli_rev != LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0376 READ_REV Error. SLI Level %d "
@@ -6063,8 +6219,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"rc = x%x\n", rc);
goto out_free_mbox;
}
- /* update physical xri mappings in the scsi buffers */
- lpfc_scsi_buf_update(phba);
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6105,28 +6259,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
- /* Register SGL pool to the device using non-embedded mailbox command */
- if (!phba->sli4_hba.extents_in_use) {
- rc = lpfc_sli4_post_els_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during els sgl post "
- "operation\n", rc);
- rc = -ENODEV;
- goto out_free_mbox;
- }
- } else {
- rc = lpfc_sli4_post_els_sgl_list_ext(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "2560 Error %d during els sgl post "
- "operation\n", rc);
- rc = -ENODEV;
- goto out_free_mbox;
- }
+ /* update host els and scsi xri-sgl sizes and mappings */
+ rc = lpfc_sli4_xri_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "1400 Failed to update xri-sgl size and "
+ "mapping: %d\n", rc);
+ goto out_free_mbox;
}
- /* Register SCSI SGL pool to the device */
+ /* register the els sgl pool to the port */
+ rc = lpfc_sli4_repost_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+
+ /* register the allocated scsi sgl pool to the port */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7060,14 +7212,19 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
- "(x%x/x%x) cannot issue Data: "
- "x%x x%x\n",
+ "(x%x/x%x) failure: "
+ "mqe_sta: x%x mcqe_sta: x%x/x%x "
+ "Data: x%x x%x\n,",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
mboxq),
lpfc_sli_config_mbox_opcode_get(phba,
mboxq),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+ bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+ bf_get(lpfc_mcqe_ext_status,
+ &mboxq->mcqe),
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
@@ -7086,18 +7243,22 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
/* Successfully blocked, now issue sync mbox cmd */
rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR,
+ lpfc_printf_log(phba, KERN_WARNING,
LOG_MBOX | LOG_SLI,
- "(%d):2597 Mailbox command "
- "x%x (x%x/x%x) cannot issue "
- "Data: x%x x%x\n",
- mboxq->vport ?
- mboxq->vport->vpi : 0,
+ "(%d):2597 Sync Mailbox command "
+ "x%x (x%x/x%x) failure: "
+ "mqe_sta: x%x mcqe_sta: x%x/x%x "
+ "Data: x%x x%x\n,",
+ mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
mboxq),
lpfc_sli_config_mbox_opcode_get(phba,
mboxq),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+ bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+ bf_get(lpfc_mcqe_ext_status,
+ &mboxq->mcqe),
psli->sli_flag, flag);
/* Unblock the async mailbox posting afterward */
lpfc_sli4_async_mbox_unblock(phba);
@@ -7712,7 +7873,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
switch (iocbq->iocb.ulpCommand) {
case CMD_ELS_REQUEST64_CR:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
+ if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
+ ndlp = iocbq->context_un.ndlp;
+ else
+ ndlp = (struct lpfc_nodelist *)iocbq->context1;
if (!iocbq->iocb.ulpLe) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2007 Only Limited Edition cmd Format"
@@ -7751,9 +7915,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
iocbq->vport->fc_myDID);
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
+ phba->vpi_ids[iocbq->vport->vpi]);
} else if (pcmd && iocbq->context1) {
bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
@@ -7908,11 +8076,25 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* words0-2 BDE memcpy */
/* word3 iocb=iotag32 wqe=response_payload_len */
wqe->xmit_els_rsp.response_payload_len = xmit_len;
- /* word4 iocb=did wge=rsvd. */
- wqe->xmit_els_rsp.rsvd4 = 0;
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
/* word5 iocb=rsvd wge=did */
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
- iocbq->iocb.un.elsreq64.remoteID);
+ iocbq->iocb.un.xseq64.xmit_els_remoteID);
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (iocbq->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ iocbq->vport->fc_myDID);
+ if (iocbq->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
@@ -7932,11 +8114,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
iocbq->context2)->virt);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- bf_set(els_req64_sp, &wqe->els_req, 1);
- bf_set(els_req64_sid, &wqe->els_req,
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
iocbq->vport->fc_myDID);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
phba->vpi_ids[phba->pport->vpi]);
}
command_type = OTHER_COMMAND;
@@ -13080,9 +13262,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
} else {
set_bit(xri, phba->sli4_hba.xri_bmask);
phba->sli4_hba.max_cfg_param.xri_used++;
- phba->sli4_hba.xri_count++;
}
-
spin_unlock_irq(&phba->hbalock);
return xri;
}
@@ -13098,7 +13278,6 @@ void
__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
{
if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
- phba->sli4_hba.xri_count--;
phba->sli4_hba.max_cfg_param.xri_used--;
}
}
@@ -13134,46 +13313,45 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
uint16_t xri_index;
xri_index = lpfc_sli4_alloc_xri(phba);
- if (xri_index != NO_XRI)
- return xri_index;
-
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2004 Failed to allocate XRI.last XRITAG is %d"
- " Max XRI is %d, Used XRI is %d\n",
- xri_index,
- phba->sli4_hba.max_cfg_param.max_xri,
- phba->sli4_hba.max_cfg_param.xri_used);
- return NO_XRI;
+ if (xri_index == NO_XRI)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2004 Failed to allocate XRI.last XRITAG is %d"
+ " Max XRI is %d, Used XRI is %d\n",
+ xri_index,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.max_cfg_param.xri_used);
+ return xri_index;
}
/**
* lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
+ * @post_sgl_list: pointer to els sgl entry list.
+ * @count: number of els sgl entries on the list.
*
* This routine is invoked to post a block of driver's sgl pages to the
* HBA using non-embedded mailbox command. No Lock is held. This routine
* is only called when the driver is loading and after all IO has been
* stopped.
**/
-int
-lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
+static int
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_sgl_list,
+ int post_cnt)
{
- struct lpfc_sglq *sglq_entry;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
struct sgl_page_pairs *sgl_pg_pairs;
void *viraddr;
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0, lxri = 0;
- int els_xri_cnt, rc = 0;
+ uint16_t xritag_start = 0;
+ int rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- /* The number of sgls to be posted */
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-
- reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13203,25 +13381,8 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
- for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
- sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
-
- /*
- * Assign the sglq a physical xri only if the driver has not
- * initialized those resources. A port reset only needs
- * the sglq's posted.
- */
- if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
- LPFC_XRI_RSRC_RDY) {
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
- sglq_entry->sli4_lxritag = lxri;
- sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- }
-
+ pg_pairs = 0;
+ list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -13236,11 +13397,12 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
+ pg_pairs++;
}
/* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
sgl->word0 = cpu_to_le32(sgl->word0);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -13260,183 +13422,6 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
-
- if (rc == 0)
- bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
- LPFC_XRI_RSRC_RDY);
- return rc;
-}
-
-/**
- * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to post a block of driver's sgl pages to the
- * HBA using non-embedded mailbox command. No Lock is held. This routine
- * is only called when the driver is loading and after all IO has been
- * stopped.
- **/
-int
-lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
-{
- struct lpfc_sglq *sglq_entry;
- struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
- struct sgl_page_pairs *sgl_pg_pairs;
- void *viraddr;
- LPFC_MBOXQ_t *mbox;
- uint32_t reqlen, alloclen, index;
- uint32_t mbox_tmo;
- uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
- uint16_t xritag_start = 0, lxri = 0;
- struct lpfc_rsrc_blks *rsrc_blk;
- int cnt, ttl_cnt, rc = 0;
- int loop_cnt;
- uint32_t shdr_status, shdr_add_status;
- union lpfc_sli4_cfg_shdr *shdr;
-
- /* The number of sgls to be posted */
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-
- reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
- sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > SLI4_PAGE_SIZE) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2989 Block sgl registration required DMA "
- "size (%d) great than a page\n", reqlen);
- return -ENOMEM;
- }
-
- cnt = 0;
- ttl_cnt = 0;
- post_els_xri_cnt = els_xri_cnt;
- list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
- list) {
- rsrc_start = rsrc_blk->rsrc_start;
- rsrc_size = rsrc_blk->rsrc_size;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3014 Working ELS Extent start %d, cnt %d\n",
- rsrc_start, rsrc_size);
-
- loop_cnt = min(post_els_xri_cnt, rsrc_size);
- if (loop_cnt < post_els_xri_cnt) {
- post_els_xri_cnt -= loop_cnt;
- ttl_cnt += loop_cnt;
- } else
- ttl_cnt += post_els_xri_cnt;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- /*
- * Allocate DMA memory and set up the non-embedded mailbox
- * command.
- */
- alloclen = lpfc_sli4_config(phba, mbox,
- LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
- reqlen, LPFC_SLI4_MBX_NEMBED);
- if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2987 Allocated DMA memory size (%d) "
- "is less than the requested DMA memory "
- "size (%d)\n", alloclen, reqlen);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
-
- /* Set up the SGL pages in the non-embedded DMA pages */
- viraddr = mbox->sge_array->addr[0];
- sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
- sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
- /*
- * The starting resource may not begin at zero. Control
- * the loop variants via the block resource parameters,
- * but handle the sge pointers with a zero-based index
- * that doesn't get reset per loop pass.
- */
- for (index = rsrc_start;
- index < rsrc_start + loop_cnt;
- index++) {
- sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
-
- /*
- * Assign the sglq a physical xri only if the driver
- * has not initialized those resources. A port reset
- * only needs the sglq's posted.
- */
- if (bf_get(lpfc_xri_rsrc_rdy,
- &phba->sli4_hba.sli4_flags) !=
- LPFC_XRI_RSRC_RDY) {
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- rc = -ENOMEM;
- goto err_exit;
- }
- sglq_entry->sli4_lxritag = lxri;
- sglq_entry->sli4_xritag =
- phba->sli4_hba.xri_ids[lxri];
- }
-
- /* Set up the sge entry */
- sgl_pg_pairs->sgl_pg0_addr_lo =
- cpu_to_le32(putPaddrLow(sglq_entry->phys));
- sgl_pg_pairs->sgl_pg0_addr_hi =
- cpu_to_le32(putPaddrHigh(sglq_entry->phys));
- sgl_pg_pairs->sgl_pg1_addr_lo =
- cpu_to_le32(putPaddrLow(0));
- sgl_pg_pairs->sgl_pg1_addr_hi =
- cpu_to_le32(putPaddrHigh(0));
-
- /* Track the starting physical XRI for the mailbox. */
- if (index == rsrc_start)
- xritag_start = sglq_entry->sli4_xritag;
- sgl_pg_pairs++;
- cnt++;
- }
-
- /* Complete initialization and perform endian conversion. */
- rsrc_blk->rsrc_used += loop_cnt;
- bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
- sgl->word0 = cpu_to_le32(sgl->word0);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3015 Post ELS Extent SGL, start %d, "
- "cnt %d, used %d\n",
- xritag_start, loop_cnt, rsrc_blk->rsrc_used);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
- }
- shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status,
- &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (rc != MBX_TIMEOUT)
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2988 POST_SGL_BLOCK mailbox "
- "command failed status x%x "
- "add_status x%x mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
- rc = -ENXIO;
- goto err_exit;
- }
- if (ttl_cnt >= els_xri_cnt)
- break;
- }
-
- err_exit:
- if (rc == 0)
- bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
- LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -13452,8 +13437,9 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
*
**/
int
-lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
- int cnt)
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
+ struct list_head *sblist,
+ int count)
{
struct lpfc_scsi_buf *psb;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -13469,7 +13455,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
union lpfc_sli4_cfg_shdr *shdr;
/* Calculate the requested length of the dma memory */
- reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ reqlen = count * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13553,169 +13539,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
- * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
- * @phba: pointer to lpfc hba data structure.
- * @sblist: pointer to scsi buffer list.
- * @count: number of scsi buffers on the list.
- *
- * This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
- * No Lock is held.
- *
- **/
-int
-lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
- int cnt)
-{
- struct lpfc_scsi_buf *psb = NULL;
- struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
- struct sgl_page_pairs *sgl_pg_pairs;
- void *viraddr;
- LPFC_MBOXQ_t *mbox;
- uint32_t reqlen, alloclen, pg_pairs;
- uint32_t mbox_tmo;
- uint16_t xri_start = 0, scsi_xri_start;
- uint16_t rsrc_range;
- int rc = 0, avail_cnt;
- uint32_t shdr_status, shdr_add_status;
- dma_addr_t pdma_phys_bpl1;
- union lpfc_sli4_cfg_shdr *shdr;
- struct lpfc_rsrc_blks *rsrc_blk;
- uint32_t xri_cnt = 0;
-
- /* Calculate the total requested length of the dma memory */
- reqlen = cnt * sizeof(struct sgl_page_pairs) +
- sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > SLI4_PAGE_SIZE) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2932 Block sgl registration required DMA "
- "size (%d) great than a page\n", reqlen);
- return -ENOMEM;
- }
-
- /*
- * The use of extents requires the driver to post the sgl headers
- * in multiple postings to meet the contiguous resource assignment.
- */
- psb = list_prepare_entry(psb, sblist, list);
- scsi_xri_start = phba->sli4_hba.scsi_xri_start;
- list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
- list) {
- rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
- if (rsrc_range < scsi_xri_start)
- continue;
- else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
- continue;
- else
- avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
-
- reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
- sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- /*
- * Allocate DMA memory and set up the non-embedded mailbox
- * command. The mbox is used to post an SGL page per loop
- * but the DMA memory has a use-once semantic so the mailbox
- * is used and freed per loop pass.
- */
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2933 Failed to allocate mbox cmd "
- "memory\n");
- return -ENOMEM;
- }
- alloclen = lpfc_sli4_config(phba, mbox,
- LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
- reqlen,
- LPFC_SLI4_MBX_NEMBED);
- if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2934 Allocated DMA memory size (%d) "
- "is less than the requested DMA memory "
- "size (%d)\n", alloclen, reqlen);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
-
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
- /* Set up the SGL pages in the non-embedded DMA pages */
- sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
- sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
- /* pg_pairs tracks posted SGEs per loop iteration. */
- pg_pairs = 0;
- list_for_each_entry_continue(psb, sblist, list) {
- /* Set up the sge entry */
- sgl_pg_pairs->sgl_pg0_addr_lo =
- cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
- sgl_pg_pairs->sgl_pg0_addr_hi =
- cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
- if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
- pdma_phys_bpl1 = psb->dma_phys_bpl +
- SGL_PAGE_SIZE;
- else
- pdma_phys_bpl1 = 0;
- sgl_pg_pairs->sgl_pg1_addr_lo =
- cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
- sgl_pg_pairs->sgl_pg1_addr_hi =
- cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
- /* Keep the first xri for this extent. */
- if (pg_pairs == 0)
- xri_start = psb->cur_iocbq.sli4_xritag;
- sgl_pg_pairs++;
- pg_pairs++;
- xri_cnt++;
-
- /*
- * Track two exit conditions - the loop has constructed
- * all of the caller's SGE pairs or all available
- * resource IDs in this extent are consumed.
- */
- if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
- break;
- }
- rsrc_blk->rsrc_used += pg_pairs;
- bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3016 Post SCSI Extent SGL, start %d, cnt %d "
- "blk use %d\n",
- xri_start, pg_pairs, rsrc_blk->rsrc_used);
- /* Perform endian conversion if necessary */
- sgl->word0 = cpu_to_le32(sgl->word0);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
- }
- shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (rc != MBX_TIMEOUT)
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2935 POST_SGL_BLOCK mailbox command "
- "failed status x%x add_status x%x "
- "mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
- return -ENXIO;
- }
-
- /* Post only what is requested. */
- if (xri_cnt >= cnt)
- break;
- }
- return rc;
-}
-
-/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -13839,8 +13662,13 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
fc_hdr->fh_d_id[1] << 8 |
fc_hdr->fh_d_id[2]);
+
if (did == Fabric_DID)
return phba->pport;
+ if ((phba->pport->fc_flag & FC_PT2PT) &&
+ !(phba->link_state == LPFC_HBA_READY))
+ return phba->pport;
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
@@ -14133,7 +13961,6 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
return NO_XRI;
}
-
/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
@@ -14148,7 +13975,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
{
struct lpfc_iocbq *ctiocb = NULL;
struct lpfc_nodelist *ndlp;
- uint16_t oxid, rxid;
+ uint16_t oxid, rxid, xri, lxri;
uint32_t sid, fctl;
IOCB_t *icmd;
int rc;
@@ -14167,8 +13994,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (lpfc_sli4_xri_inrange(phba, rxid))
- lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
ctiocb = lpfc_sli_get_iocbq(phba);
@@ -14199,13 +14024,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
+ if (fctl & FC_FC_EX_CTX)
+ /* Exchange responder sent the abort so we
+ * own the oxid.
+ */
+ xri = oxid;
+ else
+ xri = rxid;
+ lxri = lpfc_sli4_xri_inrange(phba, xri);
+ if (lxri != NO_XRI)
+ lpfc_set_rrq_active(phba, ndlp, lxri,
+ (xri == oxid) ? rxid : oxid, 0);
/* If the oxid maps to the FCP XRI range or if it is out of range,
* send a BLS_RJT. The driver no longer has that exchange.
* Override the IOCB for a BA_RJT.
*/
- if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+ if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
phba->sli4_hba.max_cfg_param.xri_base) ||
- oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+ xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
phba->sli4_hba.max_cfg_param.xri_base)) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
@@ -14377,7 +14213,15 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
/* Initialize the first IOCB. */
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+
+ /* Check FC Header to see what TYPE of frame we are rcv'ing */
+ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
+ first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
+ first_iocbq->iocb.un.rcvels.parmRo =
+ sli4_did_from_fc_hdr(fc_hdr);
+ first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
+ } else
+ first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = NO_XRI;
first_iocbq->iocb.unsli3.rcvsli3.ox_id =
be16_to_cpu(fc_hdr->fh_ox_id);
@@ -14507,6 +14351,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
struct fc_frame_header *fc_hdr;
struct lpfc_vport *vport;
uint32_t fcfi;
+ uint32_t did;
/* Process each received buffer */
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
@@ -14522,12 +14367,32 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
else
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
+
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
- if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ if (!vport) {
/* throw out the frame */
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
+
+ /* d_id this frame is directed to */
+ did = sli4_did_from_fc_hdr(fc_hdr);
+
+ /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
+ (did != Fabric_DID)) {
+ /*
+ * Throw out the frame if we are not pt2pt.
+ * The pt2pt protocol allows for discovery frames
+ * to be received without a registered VPI.
+ */
+ if (!(vport->fc_flag & FC_PT2PT) ||
+ (phba->link_state == LPFC_HBA_READY)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ }
+
/* Handle the basic abort sequence (BA_ABTS) event */
if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
lpfc_sli4_handle_unsol_abort(vport, dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3290b8e7ab65..2626f58c0747 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -68,7 +68,7 @@ struct lpfc_iocbq {
#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
-#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
+#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c19d139618b7..a4a77080091b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -75,11 +75,19 @@
(fc_hdr)->fh_s_id[1] << 8 | \
(fc_hdr)->fh_s_id[2])
+#define sli4_did_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_d_id[0] << 16 | \
+ (fc_hdr)->fh_d_id[1] << 8 | \
+ (fc_hdr)->fh_d_id[2])
+
#define sli4_fctl_from_fc_hdr(fc_hdr) \
((fc_hdr)->fh_f_ctl[0] << 16 | \
(fc_hdr)->fh_f_ctl[1] << 8 | \
(fc_hdr)->fh_f_ctl[2])
+#define sli4_type_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_type)
+
#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
enum lpfc_sli4_queue_type {
@@ -493,14 +501,12 @@ struct lpfc_sli4_hba {
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t els_xri_cnt;
uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
- struct lpfc_sglq **lpfc_els_sgl_array;
struct list_head lpfc_abts_els_sgl_list;
- struct lpfc_scsi_buf **lpfc_scsi_psb_array;
struct list_head lpfc_abts_scsi_buf_list;
- uint32_t total_sglq_bufs;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
@@ -509,7 +515,6 @@ struct lpfc_sli4_hba {
struct list_head lpfc_rpi_blk_list;
unsigned long *xri_bmask;
uint16_t *xri_ids;
- uint16_t xri_count;
struct list_head lpfc_xri_blk_list;
unsigned long *vfi_bmask;
uint16_t *vfi_ids;
@@ -614,11 +619,7 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
-int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
-int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
- int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 25cefc254b76..59c57a409981 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.30"
+#define LPFC_DRIVER_VERSION "8.3.31"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e5f416f8042d..e8f892647681 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.14-rc1"
-#define MEGASAS_RELDATE "Jan. 6, 2012"
-#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "00.00.06.15-rc1"
+#define MEGASAS_RELDATE "Mar. 19, 2012"
+#define MEGASAS_EXT_VERSION "Mon. Mar. 19 17:00:00 PDT 2012"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8b300be44284..dc27598785e5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.14-rc1
+ * Version : v00.00.06.15-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 294abb0defa6..e3d251a2e26a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -362,15 +362,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
/* assume this IO needs the full row - we'll adjust if not true */
regSize = stripSize;
- /* If IO spans more than 1 strip, fp is not possible
- FP is not possible for writes on non-0 raid levels
- FP is not possible if LD is not capable */
- if (num_strips > 1 || (!isRead && raid->level != 0) ||
- !raid->capability.fpCapable) {
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ } else
io_info->fpOkForIo = FALSE;
- } else {
- io_info->fpOkForIo = TRUE;
- }
if (numRows == 1) {
/* single-strip IOs can always lock only the data needed */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bfd87fab39aa..a610cf1d4847 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -634,9 +634,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
fusion->reply_frames_desc_phys;
IOCInitMessage->SystemRequestFrameBaseAddress =
fusion->io_request_frames_phys;
- /* Set to 0 for none or 1 MSI-X vectors */
- IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ?
- instance->msix_vectors : 0);
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a01f0aa66f20..a80f3220c641 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.22
+ * mpi2.h Version: 02.00.23
*
* Version History
* ---------------
@@ -71,6 +71,7 @@
* 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
* 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
* 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -96,7 +97,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x16)
+#define MPI2_HEADER_VERSION_UNIT (0x17)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -480,7 +481,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
U64 Words;
} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
- Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 3a023dad77a1..737fa8cfb54a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.21
+ * mpi2_cnfg.h Version: 02.00.22
*
* Version History
* ---------------
@@ -146,7 +146,9 @@
* Added SpinupFlags field containing a Disable Spin-up
* bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
* SAS IO Unit Page 4.
-
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
* --------------------------------------------------------------------------
*/
@@ -1131,9 +1133,10 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_6
} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
-#define MPI2_IOCPAGE6_PAGEVERSION (0x04)
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
/* defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
@@ -1204,24 +1207,29 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_8
typedef struct _MPI2_CONFIG_PAGE_BIOS_1
{
- MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
- U32 BiosOptions; /* 0x04 */
- U32 IOCSettings; /* 0x08 */
- U32 Reserved1; /* 0x0C */
- U32 DeviceSettings; /* 0x10 */
- U16 NumberOfDevices; /* 0x14 */
- U16 Reserved2; /* 0x16 */
- U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
- U16 IOTimeoutSequential; /* 0x1A */
- U16 IOTimeoutOther; /* 0x1C */
- U16 IOTimeoutBlockDevicesRM; /* 0x1E */
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 BiosOptions; /* 0x04 */
+ U32 IOCSettings; /* 0x08 */
+ U32 Reserved1; /* 0x0C */
+ U32 DeviceSettings; /* 0x10 */
+ U16 NumberOfDevices; /* 0x14 */
+ U16 UEFIVersion; /* 0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
+ U16 IOTimeoutSequential; /* 0x1A */
+ U16 IOTimeoutOther; /* 0x1C */
+ U16 IOTimeoutBlockDevicesRM; /* 0x1E */
} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
-#define MPI2_BIOSPAGE1_PAGEVERSION (0x04)
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
/* values for BIOS Page 1 BiosOptions field */
-#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
/* values for BIOS Page 1 IOCSettings field */
#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
@@ -1248,6 +1256,13 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+/* defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
/* BIOS Page 2 */
@@ -2216,6 +2231,27 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+/* SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U64 TimeStamp; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+ U32 FastPathPendedRequests; /* 0x18 */
+ U32 FastPathUnPendedRequests; /* 0x1C */
+ U32 FastPathHostRequestStarts; /* 0x20 */
+ U32 FastPathFirmwareRequestStarts; /* 0x24 */
+ U32 FastPathHostCompletions; /* 0x28 */
+ U32 FastPathFirmwareCompletions; /* 0x2C */
+ U32 NonFastPathRequestStarts; /* 0x30 */
+ U32 NonFastPathHostCompletions; /* 0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
/****************************************************************************
* SAS Expander Config Pages
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 8a59a772fdf2..6102ef2cb2d8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -699,6 +699,11 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u16 ioc_status;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
@@ -930,16 +935,18 @@ _base_interrupt(int irq, void *bus_id)
else if (request_desript_type ==
MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
goto next;
- if (smid)
+ if (smid) {
cb_idx = _base_get_cb_idx(ioc, smid);
- if (smid && cb_idx != 0xFF) {
- rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
- reply);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS))
+ && (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
if (reply)
- _base_display_reply_info(ioc, smid, msix_index,
- reply);
+ _base_display_reply_info(ioc, smid,
+ msix_index, reply);
if (rc)
mpt2sas_base_free_smid(ioc, smid);
+ }
}
if (!smid)
_base_async_event(ioc, msix_index, reply);
@@ -3343,7 +3350,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
}
pfacts = &ioc->pfacts[port];
- memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
+ memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
pfacts->PortNumber = mpi_reply.PortNumber;
pfacts->VP_ID = mpi_reply.VP_ID;
pfacts->VF_ID = mpi_reply.VF_ID;
@@ -3385,7 +3392,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
facts = &ioc->facts;
- memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
+ memset(facts, 0, sizeof(struct mpt2sas_facts));
facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
facts->VP_ID = mpi_reply.VP_ID;
@@ -4153,7 +4160,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (ioc->is_driver_loading) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
== 0x80) {
- hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
+ hide_flag = (u8) (
+ le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
MFG_PAGE10_HIDE_SSDS_MASK);
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
ioc->mfg_pg10_hide_flag = hide_flag;
@@ -4262,7 +4270,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
goto out_free_resources;
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
- sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
+ sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
if (!ioc->pfacts) {
r = -ENOMEM;
goto out_free_resources;
@@ -4279,7 +4287,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
goto out_free_resources;
init_waitqueue_head(&ioc->reset_wq);
-
/* allocate memory pd handle bitmask list */
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
@@ -4290,7 +4297,12 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
r = -ENOMEM;
goto out_free_resources;
}
-
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
ioc->fwfault_debug = mpt2sas_fwfault_debug;
/* base internal command bits */
@@ -4377,6 +4389,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (ioc->is_warpdrive)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -4418,6 +4431,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
if (ioc->is_warpdrive)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index c7459fdc06cc..b6dd3a5de7f9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "12.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 12
+#define MPT2SAS_DRIVER_VERSION "13.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 13
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -720,6 +720,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @io_missing_delay: time for IO completed by fw when PDR enabled
* @device_missing_delay: time for device missing by fw when PDR enabled
* @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
* @pd_handles : bitmask for PD handles
* @pd_handles_sz : size of pd_handle bitmask
* @config_page_sz: config page size
@@ -889,7 +890,7 @@ struct MPT2SAS_ADAPTER {
u8 io_missing_delay;
u16 device_missing_delay;
int sas_id;
-
+ void *blocking_handles;
void *pd_handles;
u16 pd_handles_sz;
@@ -1058,7 +1059,8 @@ int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
-void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+void mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address);
struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
u16 handle);
struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 3b9a28efea82..49bdd2dc8452 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -620,11 +620,10 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
* @ioc: per adapter object
* @karg - (struct mpt2_ioctl_command)
* @mf - pointer to mf in user space
- * @state - NON_BLOCKING or BLOCKING
*/
static long
-_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
- struct mpt2_ioctl_command karg, void __user *mf, enum block_state state)
+_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
+ void __user *mf)
{
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
@@ -647,11 +646,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
issue_reset = 0;
- if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
- else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
- return -ERESTARTSYS;
-
if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
ioc->name, __func__);
@@ -871,8 +865,16 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (smp_request->PassthroughFlags &
MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
data = (u8 *)&smp_request->SGL;
- else
+ else {
+ if (unlikely(data_out == NULL)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt2sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
data = data_out;
+ }
if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
ioc->ioc_link_reset_in_progress = 1;
@@ -985,7 +987,8 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
ret = -ENODATA;
if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
- MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
printk(MPT2SAS_INFO_FMT "issue target reset: handle "
"= (0x%04x)\n", ioc->name,
le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1013,27 +1016,24 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
kfree(mpi_request);
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->ctl_cmds.mutex);
return ret;
}
/**
* _ctl_getiocinfo - main handler for MPT2IOCINFO opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_getiocinfo(void __user *arg)
+_ctl_getiocinfo(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_iocinfo karg;
- struct MPT2SAS_ADAPTER *ioc;
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1069,21 +1069,19 @@ _ctl_getiocinfo(void __user *arg)
/**
* _ctl_eventquery - main handler for MPT2EVENTQUERY opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_eventquery(void __user *arg)
+_ctl_eventquery(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_eventquery karg;
- struct MPT2SAS_ADAPTER *ioc;
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1102,21 +1100,19 @@ _ctl_eventquery(void __user *arg)
/**
* _ctl_eventenable - main handler for MPT2EVENTENABLE opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_eventenable(void __user *arg)
+_ctl_eventenable(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_eventenable karg;
- struct MPT2SAS_ADAPTER *ioc;
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1142,13 +1138,13 @@ _ctl_eventenable(void __user *arg)
/**
* _ctl_eventreport - main handler for MPT2EVENTREPORT opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_eventreport(void __user *arg)
+_ctl_eventreport(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_eventreport karg;
- struct MPT2SAS_ADAPTER *ioc;
u32 number_bytes, max_events, max;
struct mpt2_ioctl_eventreport __user *uarg = arg;
@@ -1157,8 +1153,6 @@ _ctl_eventreport(void __user *arg)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1188,13 +1182,13 @@ _ctl_eventreport(void __user *arg)
/**
* _ctl_do_reset - main handler for MPT2HARDRESET opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_do_reset(void __user *arg)
+_ctl_do_reset(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_diag_reset karg;
- struct MPT2SAS_ADAPTER *ioc;
int retval;
if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1202,8 +1196,6 @@ _ctl_do_reset(void __user *arg)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
if (ioc->shost_recovery || ioc->pci_error_recovery ||
ioc->is_driver_loading)
@@ -1292,13 +1284,13 @@ _ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc,
/**
* _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*/
static long
-_ctl_btdh_mapping(void __user *arg)
+_ctl_btdh_mapping(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_ioctl_btdh_mapping karg;
- struct MPT2SAS_ADAPTER *ioc;
int rc;
if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1306,8 +1298,6 @@ _ctl_btdh_mapping(void __user *arg)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -1576,17 +1566,16 @@ mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
/**
* _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
*
* This will allow the driver to setup any required buffers that will be
* needed by firmware to communicate with the driver.
*/
static long
-_ctl_diag_register(void __user *arg, enum block_state state)
+_ctl_diag_register(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_diag_register karg;
- struct MPT2SAS_ADAPTER *ioc;
long rc;
if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1594,30 +1583,23 @@ _ctl_diag_register(void __user *arg, enum block_state state)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
- if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
- else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
- return -ERESTARTSYS;
rc = _ctl_diag_register_2(ioc, &karg);
- mutex_unlock(&ioc->ctl_cmds.mutex);
return rc;
}
/**
* _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*
* This will allow the driver to cleanup any memory allocated for diag
* messages and to free up any resources.
*/
static long
-_ctl_diag_unregister(void __user *arg)
+_ctl_diag_unregister(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_diag_unregister karg;
- struct MPT2SAS_ADAPTER *ioc;
void *request_data;
dma_addr_t request_data_dma;
u32 request_data_sz;
@@ -1628,8 +1610,6 @@ _ctl_diag_unregister(void __user *arg)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -1678,6 +1658,7 @@ _ctl_diag_unregister(void __user *arg)
/**
* _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
*
* The application will send only buffer_type and unique_id. Driver will
@@ -1685,10 +1666,9 @@ _ctl_diag_unregister(void __user *arg)
* 0x00, the driver will return info specified by Buffer Type.
*/
static long
-_ctl_diag_query(void __user *arg)
+_ctl_diag_query(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_diag_query karg;
- struct MPT2SAS_ADAPTER *ioc;
void *request_data;
int i;
u8 buffer_type;
@@ -1698,8 +1678,6 @@ _ctl_diag_query(void __user *arg)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -1866,17 +1844,15 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
/**
* _ctl_diag_release - request to send Diag Release Message to firmware
* @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
*
* This allows ownership of the specified buffer to returned to the driver,
* allowing an application to read the buffer without fear that firmware is
* overwritting information in the buffer.
*/
static long
-_ctl_diag_release(void __user *arg, enum block_state state)
+_ctl_diag_release(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_diag_release karg;
- struct MPT2SAS_ADAPTER *ioc;
void *request_data;
int rc;
u8 buffer_type;
@@ -1887,8 +1863,6 @@ _ctl_diag_release(void __user *arg, enum block_state state)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -1942,32 +1916,25 @@ _ctl_diag_release(void __user *arg, enum block_state state)
return 0;
}
- if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
- else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
- return -ERESTARTSYS;
-
rc = _ctl_send_release(ioc, buffer_type, &issue_reset);
if (issue_reset)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
- mutex_unlock(&ioc->ctl_cmds.mutex);
return rc;
}
/**
* _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
* @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
*/
static long
-_ctl_diag_read_buffer(void __user *arg, enum block_state state)
+_ctl_diag_read_buffer(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt2_diag_read_buffer karg;
struct mpt2_diag_read_buffer __user *uarg = arg;
- struct MPT2SAS_ADAPTER *ioc;
void *request_data, *diag_data;
Mpi2DiagBufferPostRequest_t *mpi_request;
Mpi2DiagBufferPostReply_t *mpi_reply;
@@ -1983,8 +1950,6 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
__FILE__, __LINE__, __func__);
return -EFAULT;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -2055,10 +2020,6 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
}
/* Get a free request frame and save the message context.
*/
- if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
- else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
- return -ERESTARTSYS;
if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
@@ -2139,115 +2100,170 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
out:
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->ctl_cmds.mutex);
return rc;
}
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt2_ioctl_command32)
+ *
+ * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT2SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt2_ioctl_command32 karg32;
+ struct mpt2_ioctl_command32 __user *uarg;
+ struct mpt2_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt2_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
/**
* _ctl_ioctl_main - main ioctl entry point
* @file - (struct file)
* @cmd - ioctl opcode
* @arg -
+ * compat - handles 32 bit applications in 64bit os
*/
static long
-_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
{
+ struct MPT2SAS_ADAPTER *ioc;
+ struct mpt2_ioctl_header ioctl_header;
enum block_state state;
long ret = -EINVAL;
- state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
- BLOCKING;
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt2_ioctl_header))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ return -ERESTARTSYS;
switch (cmd) {
case MPT2IOCINFO:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
- ret = _ctl_getiocinfo(arg);
+ ret = _ctl_getiocinfo(ioc, arg);
break;
+#ifdef CONFIG_COMPAT
+ case MPT2COMMAND32:
+#endif
case MPT2COMMAND:
{
- struct mpt2_ioctl_command karg;
struct mpt2_ioctl_command __user *uarg;
- struct MPT2SAS_ADAPTER *ioc;
-
+ struct mpt2_ioctl_command karg;
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
- return -EFAULT;
+ ret = -EFAULT;
+ break;
}
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
- !ioc)
- return -ENODEV;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery ||
- ioc->is_driver_loading)
- return -EAGAIN;
-
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
uarg = arg;
- ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
}
break;
}
case MPT2EVENTQUERY:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
- ret = _ctl_eventquery(arg);
+ ret = _ctl_eventquery(ioc, arg);
break;
case MPT2EVENTENABLE:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
- ret = _ctl_eventenable(arg);
+ ret = _ctl_eventenable(ioc, arg);
break;
case MPT2EVENTREPORT:
- ret = _ctl_eventreport(arg);
+ ret = _ctl_eventreport(ioc, arg);
break;
case MPT2HARDRESET:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
- ret = _ctl_do_reset(arg);
+ ret = _ctl_do_reset(ioc, arg);
break;
case MPT2BTDHMAPPING:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
- ret = _ctl_btdh_mapping(arg);
+ ret = _ctl_btdh_mapping(ioc, arg);
break;
case MPT2DIAGREGISTER:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
- ret = _ctl_diag_register(arg, state);
+ ret = _ctl_diag_register(ioc, arg);
break;
case MPT2DIAGUNREGISTER:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
- ret = _ctl_diag_unregister(arg);
+ ret = _ctl_diag_unregister(ioc, arg);
break;
case MPT2DIAGQUERY:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
- ret = _ctl_diag_query(arg);
+ ret = _ctl_diag_query(ioc, arg);
break;
case MPT2DIAGRELEASE:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
- ret = _ctl_diag_release(arg, state);
+ ret = _ctl_diag_release(ioc, arg);
break;
case MPT2DIAGREADBUFFER:
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
- ret = _ctl_diag_read_buffer(arg, state);
+ ret = _ctl_diag_read_buffer(ioc, arg);
break;
default:
- {
- struct mpt2_ioctl_command karg;
- struct MPT2SAS_ADAPTER *ioc;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- printk(KERN_ERR "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
-
- if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
- !ioc)
- return -ENODEV;
dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
"unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
break;
}
- }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
return ret;
}
@@ -2262,66 +2278,11 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
- mutex_lock(&_ctl_mutex);
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
- mutex_unlock(&_ctl_mutex);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
return ret;
}
-
#ifdef CONFIG_COMPAT
/**
- * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg - (struct mpt2_ioctl_command32)
- *
- * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
- */
-static long
-_ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
-{
- struct mpt2_ioctl_command32 karg32;
- struct mpt2_ioctl_command32 __user *uarg;
- struct mpt2_ioctl_command karg;
- struct MPT2SAS_ADAPTER *ioc;
- enum block_state state;
-
- if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
- return -EINVAL;
-
- uarg = (struct mpt2_ioctl_command32 __user *) arg;
-
- if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
- printk(KERN_ERR "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
- return -ENODEV;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery ||
- ioc->is_driver_loading)
- return -EAGAIN;
-
- memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
- karg.hdr.ioc_number = karg32.hdr.ioc_number;
- karg.hdr.port_number = karg32.hdr.port_number;
- karg.hdr.max_data_size = karg32.hdr.max_data_size;
- karg.timeout = karg32.timeout;
- karg.max_reply_bytes = karg32.max_reply_bytes;
- karg.data_in_size = karg32.data_in_size;
- karg.data_out_size = karg32.data_out_size;
- karg.max_sense_bytes = karg32.max_sense_bytes;
- karg.data_sge_offset = karg32.data_sge_offset;
- karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
- karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
- karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
- karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
- state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
- return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
-}
-
-/**
* _ctl_ioctl_compat - main ioctl entry point (compat)
* @file -
* @cmd -
@@ -2334,12 +2295,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
{
long ret;
- mutex_lock(&_ctl_mutex);
- if (cmd == MPT2COMMAND32)
- ret = _ctl_compat_mpt_command(file, cmd, arg);
- else
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
- mutex_unlock(&_ctl_mutex);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
return ret;
}
#endif
@@ -2884,7 +2840,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
struct mpt2_diag_register diag_register;
u8 issue_reset = 0;
- if (sscanf(buf, "%s", str) != 1)
+ if (sscanf(buf, "%9s", str) != 1)
return -EINVAL;
if (!strcmp(str, "post")) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d953a57e779d..76973e8ca4ba 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -579,14 +579,12 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device->sas_address)) {
- list_del(&sas_device->list);
- kfree(sas_device);
- }
+ list_del(&sas_device->list);
+ kfree(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
+
/**
* _scsih_sas_device_add - insert sas_device to the list.
* @ioc: per adapter object
@@ -645,8 +643,8 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
_scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
/**
@@ -755,7 +753,6 @@ _scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @raid_device: raid_device object
*
- * This is removed from the raid_device_list link list.
*/
static void
_scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -765,7 +762,6 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_del(&raid_device->list);
- memset(raid_device, 0, sizeof(struct _raid_device));
kfree(raid_device);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -1199,10 +1195,10 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_device_priv_data->sas_target->sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (sas_device && sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
not_sata:
@@ -1299,7 +1295,8 @@ _scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->handle = raid_device->handle;
sas_target_priv_data->sas_address = raid_device->wwid;
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
- sas_target_priv_data->raid_device = raid_device;
+ if (ioc->is_warpdrive)
+ sas_target_priv_data->raid_device = raid_device;
raid_device->starget = starget;
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -1465,12 +1462,12 @@ _scsih_slave_destroy(struct scsi_device *sdev)
/**
* _scsih_display_sata_capabilities - sata capabilities
* @ioc: per adapter object
- * @sas_device: the sas_device object
+ * @handle: device handle
* @sdev: scsi device struct
*/
static void
_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
- struct _sas_device *sas_device, struct scsi_device *sdev)
+ u16 handle, struct scsi_device *sdev)
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
@@ -1479,7 +1476,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
u32 device_info;
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
- MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, sas_device->handle))) {
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return;
@@ -1537,27 +1534,40 @@ _scsih_get_resync(struct device *dev)
Mpi2RaidVolPage0_t vol_pg0;
Mpi2ConfigReply_t mpi_reply;
u32 volume_status_flags;
- u8 percent_complete = 0;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ if (ioc->is_warpdrive)
+ goto out;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (!raid_device || ioc->is_warpdrive)
+ if (!handle)
goto out;
if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
- MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
goto out;
}
volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
- if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
- percent_complete = raid_device->percent_complete;
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
out:
raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
}
@@ -1577,17 +1587,20 @@ _scsih_get_state(struct device *dev)
Mpi2ConfigReply_t mpi_reply;
u32 volstate;
enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device)
goto out;
if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
- MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -1620,14 +1633,14 @@ _scsih_get_state(struct device *dev)
/**
* _scsih_set_level - set raid level
* @sdev: scsi device struct
- * @raid_device: raid_device object
+ * @volume_type: volume type
*/
static void
-_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
{
enum raid_level level = RAID_LEVEL_UNKNOWN;
- switch (raid_device->volume_type) {
+ switch (volume_type) {
case MPI2_RAID_VOL_TYPE_RAID0:
level = RAID_LEVEL_0;
break;
@@ -1722,6 +1735,7 @@ _scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
struct _raid_device *raid_device;
u16 handle;
u16 ioc_status;
+ unsigned long flags;
handle = 0xFFFF;
while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -1731,9 +1745,11 @@ _scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
break;
handle = le16_to_cpu(vol_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
if (raid_device)
raid_device->direct_io_enabled = 0;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
return;
}
@@ -1838,7 +1854,8 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
&pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
- pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) {
+ le16_to_cpu(pd_pg0.DevHandle) ==
+ MPT2SAS_INVALID_DEVICE_HANDLE) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
"disabled for the drive with handle(0x%04x) member"
"handle retrieval failed for member number=%d\n",
@@ -1968,19 +1985,21 @@ _scsih_slave_configure(struct scsi_device *sdev)
u8 ssp_target = 0;
char *ds = "";
char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
qdepth = 1;
sas_device_priv_data = sdev->hostdata;
sas_device_priv_data->configured_lun = 1;
sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
/* raid volume handling */
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = _scsih_raid_device_find_by_handle(ioc,
- sas_target_priv_data->handle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
@@ -1989,8 +2008,6 @@ _scsih_slave_configure(struct scsi_device *sdev)
return 1;
}
- _scsih_get_volume_capabilities(ioc, raid_device);
-
if (_scsih_get_volume_capabilities(ioc, raid_device)) {
dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
"failure at %s:%d/%s()!\n", ioc->name, __FILE__,
@@ -2058,68 +2075,67 @@ _scsih_slave_configure(struct scsi_device *sdev)
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
/* raid transport support */
if (!ioc->is_warpdrive)
- _scsih_set_level(sdev, raid_device);
+ _scsih_set_level(sdev, raid_device->volume_type);
return 0;
}
/* non-raid handling */
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device_priv_data->sas_target->sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device) {
- if (sas_target_priv_data->flags &
- MPT_TARGET_FLAGS_RAID_COMPONENT) {
- if (mpt2sas_config_get_volume_handle(ioc,
- sas_device->handle, &sas_device->volume_handle)) {
- dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- return 1;
- }
- if (sas_device->volume_handle &&
- mpt2sas_config_get_volume_wwid(ioc,
- sas_device->volume_handle,
- &sas_device->volume_wwid)) {
- dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- return 1;
- }
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt2sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
}
- if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
- ssp_target = 1;
- ds = "SSP";
- } else {
- qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
- if (sas_device->device_info &
- MPI2_SAS_DEVICE_INFO_STP_TARGET)
- ds = "STP";
- else if (sas_device->device_info &
- MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
- ds = "SATA";
+ if (volume_handle && mpt2sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
}
+ }
- sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
- "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
- ds, sas_device->handle,
- (unsigned long long)sas_device->sas_address,
- sas_device->phy,
- (unsigned long long)sas_device->device_name);
- sdev_printk(KERN_INFO, sdev, "%s: "
- "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
- (unsigned long long) sas_device->enclosure_logical_id,
- sas_device->slot);
-
- if (!ssp_target)
- _scsih_display_sata_capabilities(ioc, sas_device, sdev);
- } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
return 1;
}
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy,
+ (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev, "%s: "
+ "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
+ (unsigned long long) sas_device->enclosure_logical_id,
+ sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
@@ -2899,7 +2915,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
* During device pull we need to appropiately set the sdev state.
*/
static void
-_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
{
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
@@ -2910,10 +2926,12 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
continue;
if (!sas_device_priv_data->block)
continue;
- if (sas_device_priv_data->sas_target->handle == handle) {
+ if (sas_device_priv_data->sas_target->sas_address ==
+ sas_address) {
dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
MPT2SAS_INFO_FMT "SDEV_RUNNING: "
- "handle(0x%04x)\n", ioc->name, handle));
+ "sas address(0x%016llx)\n", ioc->name,
+ (unsigned long long)sas_address));
sas_device_priv_data->block = 0;
scsi_internal_device_unblock(sdev);
}
@@ -3006,10 +3024,10 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
sas_device =
mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
mpt2sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device)
- continue;
- _scsih_block_io_device(ioc, sas_device->handle);
}
}
@@ -3020,12 +3038,9 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
SAS_EDGE_EXPANDER_DEVICE ||
mpt2sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE) {
-
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
expander_sibling =
mpt2sas_scsih_expander_find_by_sas_address(
ioc, mpt2sas_port->remote_identify.sas_address);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
_scsih_block_io_to_children_attached_to_ex(ioc,
expander_sibling);
}
@@ -3124,7 +3139,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
(unsigned long long)sas_address));
- _scsih_ublock_io_device(ioc, handle);
+ _scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
}
@@ -3174,16 +3189,19 @@ static u8
_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
u8 msix_index, u32 reply)
{
-#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
Mpi2SasIoUnitControlReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
-#endif
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "sc_complete:handle(0x%04x), (open) "
- "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
return 1;
}
@@ -3262,7 +3280,11 @@ _scsih_tm_volume_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
"progress!\n", __func__, ioc->name));
return 1;
}
-
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3325,7 +3347,11 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
"operational\n", __func__, ioc->name));
return 1;
}
-
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3441,14 +3467,20 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
_scsih_block_io_to_children_attached_directly(ioc, event_data);
return;
}
-
- if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING
- || event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) {
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
expander_handle);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
_scsih_block_io_to_children_attached_directly(ioc, event_data);
@@ -4446,8 +4478,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
_scsih_scsi_direct_io_set(ioc, smid, 0);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5020,13 +5052,11 @@ mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
sas_address);
- if (!sas_expander) {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return;
- }
- list_del(&sas_expander->list);
+ if (sas_expander)
+ list_del(&sas_expander->list);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- _scsih_expander_node_remove(ioc, sas_expander);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
}
/**
@@ -5106,6 +5136,7 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
struct MPT2SAS_TARGET *sas_target_priv_data;
u32 device_info;
+
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
return;
@@ -5139,21 +5170,24 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
}
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
printk(MPT2SAS_ERR_FMT "device is not present "
"handle(0x%04x), flags!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
}
/* check if there were any issues with discovery */
if (_scsih_check_access_status(ioc, sas_address, handle,
- sas_device_pg0.AccessStatus))
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
- _scsih_ublock_io_device(ioc, handle);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
}
@@ -5280,54 +5314,71 @@ static void
_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
struct _sas_device *sas_device)
{
- struct _sas_device sas_device_backup;
struct MPT2SAS_TARGET *sas_target_priv_data;
- if (!sas_device)
- return;
-
- memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
- _scsih_sas_device_remove(ioc, sas_device);
-
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
- sas_device_backup.handle, (unsigned long long)
- sas_device_backup.sas_address));
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
- if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
- sas_target_priv_data = sas_device_backup.starget->hostdata;
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
sas_target_priv_data->handle =
MPT2SAS_INVALID_DEVICE_HANDLE;
}
- _scsih_ublock_io_device(ioc, sas_device_backup.handle);
-
if (!ioc->hide_drives)
mpt2sas_transport_port_remove(ioc,
- sas_device_backup.sas_address,
- sas_device_backup.sas_address_parent);
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
- "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
- (unsigned long long) sas_device_backup.sas_address);
+ "(0x%016llx)\n", ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
- sas_device_backup.handle, (unsigned long long)
- sas_device_backup.sas_address));
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+ kfree(sas_device);
+}
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
}
/**
- * mpt2sas_device_remove - removing device object
+ * mpt2sas_device_remove_by_sas_address - removing device object by sas address
* @ioc: per adapter object
- * @sas_address: expander sas_address
+ * @sas_address: device sas_address
*
* Return nothing.
*/
void
-mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
{
struct _sas_device *sas_device;
unsigned long flags;
@@ -5338,14 +5389,12 @@ mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_address);
- if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ if (sas_device)
+ list_del(&sas_device->list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- _scsih_remove_device(ioc, sas_device);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
}
-
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
/**
* _scsih_sas_topology_change_event_debug - debug for topology event
@@ -5442,7 +5491,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
u16 reason_code;
u8 phy_number, max_phys;
struct _sas_node *sas_expander;
- struct _sas_device *sas_device;
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
@@ -5477,15 +5525,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
parent_handle);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander) {
sas_address = sas_expander->sas_address;
max_phys = sas_expander->num_phys;
} else if (parent_handle < ioc->sas_hba.num_phys) {
sas_address = ioc->sas_hba.sas_address;
max_phys = ioc->sas_hba.num_phys;
- } else
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
@@ -5540,16 +5590,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
break;
case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc,
- handle);
- if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock,
- flags);
- break;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- _scsih_remove_device(ioc, sas_device);
+ _scsih_device_remove_by_handle(ioc, handle);
break;
}
}
@@ -5672,20 +5713,24 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
sas_address = le64_to_cpu(event_data->SASAddress);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device || !sas_device->starget)
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
+ }
target_priv_data = sas_device->starget->hostdata;
- if (!target_priv_data)
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
+ }
if (event_data->ReasonCode ==
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
target_priv_data->tm_busy = 1;
else
target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -5950,30 +5995,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
}
/**
- * _scsih_reprobe_target - reprobing target
- * @starget: scsi target struct
- * @no_uld_attach: sdev->no_uld_attach flag setting
- *
- * Note: no_uld_attach flag determines whether the disk device is attached
- * to block layer. A value of `1` means to not attach.
- **/
-static void
-_scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
-{
- struct MPT2SAS_TARGET *sas_target_priv_data;
-
- if (starget == NULL)
- return;
- sas_target_priv_data = starget->hostdata;
- if (no_uld_attach)
- sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
- else
- sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
-
- starget_for_each_device(starget, no_uld_attach ? (void *)1 : NULL,
- _scsih_reprobe_lun);
-}
-/**
* _scsih_sas_volume_add - add new volume
* @ioc: per adapter object
* @element: IR config element data
@@ -6024,8 +6045,11 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
- } else
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
_scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
}
/**
@@ -6042,21 +6066,25 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, u16 handle)
struct _raid_device *raid_device;
unsigned long flags;
struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (!raid_device)
- return;
- if (raid_device->starget) {
- sas_target_priv_data = raid_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- scsi_remove_target(&raid_device->starget->dev);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+ "(0x%016llx)\n", ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
}
- printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
- "(0x%016llx)\n", ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
- _scsih_raid_device_remove(ioc, raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
}
/**
@@ -6072,20 +6100,31 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element)
{
struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!sas_device)
return;
/* exposing raid component */
- sas_device->volume_handle = 0;
- sas_device->volume_wwid = 0;
- clear_bit(handle, ioc->pd_handles);
- _scsih_reprobe_target(sas_device->starget, 0);
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
}
/**
@@ -6101,23 +6140,38 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element)
{
struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt2sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt2sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!sas_device)
return;
/* hiding raid component */
- mpt2sas_config_get_volume_handle(ioc, handle,
- &sas_device->volume_handle);
- mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle,
- &sas_device->volume_wwid);
- set_bit(handle, ioc->pd_handles);
- _scsih_reprobe_target(sas_device->starget, 1);
-
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
}
/**
@@ -6132,16 +6186,9 @@ static void
_scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element)
{
- struct _sas_device *sas_device;
- unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device)
- return;
- _scsih_remove_device(ioc, sas_device);
+ _scsih_device_remove_by_handle(ioc, handle);
}
/**
@@ -6583,18 +6630,13 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
/* code added for raid transport support */
if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
- handle = le16_to_cpu(event_data->VolDevHandle);
-
spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-
- if (!raid_device)
- return;
-
- if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
+ if (raid_device)
raid_device->percent_complete =
event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
}
@@ -6761,13 +6803,18 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
* required data for Direct IO
*/
_scsih_init_warpdrive_properties(ioc, raid_device);
- if (raid_device->handle == handle)
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
return;
+ }
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
if (sas_target_priv_data)
sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
return;
}
}
@@ -6939,58 +6986,56 @@ static void
_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
- struct _sas_node *sas_expander;
+ struct _sas_node *sas_expander, *sas_expander_next;
struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
ioc->name);
+ /* removing unresponding end devices */
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
- if (sas_device->responding) {
+ if (!sas_device->responding)
+ mpt2sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
sas_device->responding = 0;
- continue;
- }
- if (sas_device->starget)
- starget_printk(KERN_INFO, sas_device->starget,
- "removing: handle(0x%04x), sas_addr(0x%016llx), "
- "enclosure logical id(0x%016llx), slot(%d)\n",
- sas_device->handle,
- (unsigned long long)sas_device->sas_address,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- _scsih_remove_device(ioc, sas_device);
}
- if (!ioc->ir_firmware)
- goto retry_expander_search;
-
- list_for_each_entry_safe(raid_device, raid_device_next,
- &ioc->raid_device_list, list) {
- if (raid_device->responding) {
- raid_device->responding = 0;
- continue;
- }
- if (raid_device->starget) {
- starget_printk(KERN_INFO, raid_device->starget,
- "removing: handle(0x%04x), wwid(0x%016llx)\n",
- raid_device->handle,
- (unsigned long long)raid_device->wwid);
- scsi_remove_target(&raid_device->starget->dev);
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: "
+ "volumes\n", ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
}
- _scsih_raid_device_remove(ioc, raid_device);
}
-
- retry_expander_search:
- sas_expander = NULL;
- list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
- if (sas_expander->responding) {
+ /* removing unresponding expanders */
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
sas_expander->responding = 0;
- continue;
- }
- mpt2sas_expander_remove(ioc, sas_expander->sas_address);
- goto retry_expander_search;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
}
printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
ioc->name);
@@ -7043,6 +7088,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
struct _sas_device *sas_device;
struct _sas_node *expander_device;
static struct _raid_device *raid_device;
+ unsigned long flags;
printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
@@ -7057,8 +7103,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
break;
handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
expander_device = mpt2sas_scsih_expander_find_by_sas_address(
ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (expander_device)
_scsih_refresh_expander_links(ioc, expander_device,
handle);
@@ -7080,7 +7128,9 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
break;
phys_disk_num = pd_pg0.PhysDiskNum;
handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (sas_device)
continue;
if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -7107,8 +7157,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
break;
handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_wwid(ioc,
le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (raid_device)
continue;
if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
@@ -7140,8 +7192,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
if (!(_scsih_is_end_device(
le32_to_cpu(sas_device_pg0.DeviceInfo))))
continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (sas_device)
continue;
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
@@ -7235,7 +7289,7 @@ _firmware_event_work(struct work_struct *work)
switch (fw_event->event) {
case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
- while (scsi_host_in_recovery(ioc->shost))
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
ssleep(1);
_scsih_remove_unresponding_sas_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
@@ -7313,6 +7367,13 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
return 1;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
event = le16_to_cpu(mpi_reply->Event);
switch (event) {
@@ -7353,14 +7414,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_LOG_ENTRY_ADDED:
{
Mpi2EventDataLogEntryAdded_t *log_entry;
- u32 *log_code;
+ __le32 *log_code;
if (!ioc->is_warpdrive)
break;
log_entry = (Mpi2EventDataLogEntryAdded_t *)
mpi_reply->EventData;
- log_code = (u32 *)log_entry->LogData;
+ log_code = (__le32 *)log_entry->LogData;
if (le16_to_cpu(log_entry->LogEntryQualifier)
!= MPT2_WARPDRIVE_LOGENTRY)
@@ -7487,7 +7548,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
return;
if (mpt2sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
- mpt2sas_device_remove(ioc,
+ mpt2sas_device_remove_by_sas_address(ioc,
mpt2sas_port->remote_identify.sas_address);
else if (mpt2sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
@@ -7661,7 +7722,7 @@ _scsih_remove(struct pci_dev *pdev)
&ioc->sas_hba.sas_port_list, port_list) {
if (mpt2sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
- mpt2sas_device_remove(ioc,
+ mpt2sas_device_remove_by_sas_address(ioc,
mpt2sas_port->remote_identify.sas_address);
else if (mpt2sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
@@ -7733,11 +7794,11 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
} else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_move_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -8061,8 +8122,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_thread_fail:
list_del(&ioc->list);
scsi_remove_host(shost);
- scsi_host_put(shost);
out_add_shost_fail:
+ scsi_host_put(shost);
return -ENODEV;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 831047466a5a..c6cf20f60720 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -163,12 +163,15 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
return -EIO;
}
- memset(identify, 0, sizeof(*identify));
+ memset(identify, 0, sizeof(struct sas_identify));
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
/* sas_address */
identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
/* device_type */
switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
@@ -484,7 +487,7 @@ _transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
ioc->logging_level |= MPT_DEBUG_TRANSPORT;
if (device_type == SAS_END_DEVICE)
- mpt2sas_device_remove(ioc, sas_address);
+ mpt2sas_device_remove_by_sas_address(ioc, sas_address);
else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
device_type == SAS_FANOUT_EXPANDER_DEVICE)
mpt2sas_expander_remove(ioc, sas_address);
@@ -792,9 +795,10 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc,
sas_address_parent);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (!sas_node)
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
+ }
list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
port_list) {
if (mpt2sas_port->remote_identify.sas_address != sas_address)
@@ -804,8 +808,10 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
goto out;
}
out:
- if (!found)
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
+ }
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address == sas_address)
@@ -813,6 +819,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
sizeof(struct sas_identify));
}
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
list_for_each_entry_safe(mpt2sas_phy, next_phy,
&mpt2sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
@@ -986,12 +993,14 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (!sas_node)
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
return;
+ }
mpt2sas_phy = &sas_node->phy[phy_number];
mpt2sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
_transport_set_identify(ioc, handle,
&mpt2sas_phy->remote_identify);
@@ -1310,17 +1319,20 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (!sas_device)
- return -ENXIO;
-
- *identifier = sas_device->enclosure_logical_id;
- return 0;
+ return rc;
}
/**
@@ -1335,16 +1347,17 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (!sas_device)
- return -ENXIO;
-
- return sas_device->slot;
+ return rc;
}
/* phy control request structure */
@@ -1629,11 +1642,13 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
u16 sz;
int rc = 0;
unsigned long flags;
+ int i, discovery_active;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
if (_transport_sas_node_find_by_sas_address(ioc,
@@ -1651,7 +1666,50 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
/* handle hba phys */
- /* sas_iounit page 1 */
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ printk(MPT2SAS_ERR_FMT "discovery is active on "
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
@@ -1676,7 +1734,18 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
rc = -EIO;
goto out;
}
-
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
if (enable)
sas_iounit_pg1->PhyData[phy->number].PhyFlags
&= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
@@ -1692,6 +1761,7 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
out:
kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
return rc;
}
@@ -1828,7 +1898,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc;
+ int rc, i;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1837,24 +1907,20 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u8 issue_reset = 0;
dma_addr_t dma_addr_in = 0;
dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
+ struct bio_vec *bvec = NULL;
if (!rsp) {
printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
"missing\n", ioc->name, __func__);
return -EINVAL;
}
-
- /* do we need to support multiple segments? */
- if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
- printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
- "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
- blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
- return -EINVAL;
- }
-
- if (ioc->shost_recovery) {
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
@@ -1872,6 +1938,59 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
ioc->transport_cmds.status = MPT2_CMD_PENDING;
+ /* Check if the request is split across multiple segments */
+ if (req->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ printk(MPT2SAS_INFO_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, i) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec->bv_page) + bvec->bv_offset,
+ bvec->bv_len);
+ offset += bvec->bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ printk(MPT2SAS_INFO_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (rsp->bio->bi_vcnt > 1) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ printk(MPT2SAS_INFO_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ printk(MPT2SAS_INFO_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
@@ -1880,7 +1999,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
rc = -EFAULT;
- goto out;
+ goto unmap;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -1897,7 +2016,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
rc = -EAGAIN;
- goto out;
+ goto unmap;
}
rc = 0;
@@ -1919,16 +2038,14 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
- blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_out) {
- mpt2sas_base_free_smid(ioc, smid);
- goto unmap;
+ if (req->bio->bi_vcnt > 1) {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(req) - 4), pci_dma_out);
+ } else {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(req) - 4), dma_addr_out);
}
- ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
- dma_addr_out);
-
/* incr sgel */
psge += ioc->sge_size;
@@ -1937,16 +2054,14 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
- blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_in) {
- mpt2sas_base_free_smid(ioc, smid);
- goto unmap;
+ if (rsp->bio->bi_vcnt > 1) {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(rsp) + 4), pci_dma_in);
+ } else {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(rsp) + 4), dma_addr_in);
}
- ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
- dma_addr_in);
-
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"sending smp request\n", ioc->name, __func__));
@@ -1982,6 +2097,27 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
req->resid_len = 0;
rsp->resid_len -=
le16_to_cpu(mpi_reply->ResponseDataLength);
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (rsp->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, i) {
+ if (bytes_to_copy <= bvec->bv_len) {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bvec->bv_len);
+ bytes_to_copy -= bvec->bv_len;
+ }
+ offset += bvec->bv_len;
+ }
+ }
} else {
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
"%s - no reply\n", ioc->name, __func__));
@@ -2003,6 +2139,15 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
out:
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
mutex_unlock(&ioc->transport_cmds.mutex);
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 944afada61ee..c3d20c8d4abe 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -66,9 +66,10 @@ enum port_type {
/* driver compile-time configuration */
#define PM8001_MAX_CCB 512 /* max ccbs supported */
+#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
#define PM8001_MAX_INB_NUM 1
#define PM8001_MAX_OUTB_NUM 1
-#define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */
+#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
/* unchangeable hardware details */
#define PM8001_MAX_PHYS 8 /* max. possible phys */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9d82ee5c10de..bf54aafc2d71 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -192,7 +192,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01;
for (i = 0; i < qn; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
- 0x00000100 | (0x00000040 << 16) | (0x00<<30);
+ PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[IB].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
@@ -218,7 +218,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
}
for (i = 0; i < qn; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
- 256 | (64 << 16) | (1<<30);
+ PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[OB].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
@@ -1245,7 +1245,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
/* Stores the new consumer index */
consumer_index = pm8001_read_32(circularQ->ci_virt);
circularQ->consumer_index = cpu_to_le32(consumer_index);
- if (((circularQ->producer_idx + bcCount) % 256) ==
+ if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
le32_to_cpu(circularQ->consumer_index)) {
*messagePtr = NULL;
return -1;
@@ -1253,7 +1253,8 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
/* get memory IOMB buffer address */
offset = circularQ->producer_idx * 64;
/* increment to next bcCount element */
- circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
+ circularQ->producer_idx = (circularQ->producer_idx + bcCount)
+ % PM8001_MPI_QUEUE;
/* Adds that distance to the base of the region virtual address plus
the message header size*/
msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
@@ -1326,7 +1327,8 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
return 0;
}
/* free the circular queue buffer elements associated with the message*/
- circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
+ circularQ->consumer_idx = (circularQ->consumer_idx + bc)
+ % PM8001_MPI_QUEUE;
/* update the CI of outbound queue */
pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
circularQ->consumer_idx);
@@ -1383,7 +1385,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
circularQ->consumer_idx =
(circularQ->consumer_idx +
((le32_to_cpu(msgHeader_tmp)
- >> 24) & 0x1f)) % 256;
+ >> 24) & 0x1f))
+ % PM8001_MPI_QUEUE;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@@ -1396,7 +1399,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
circularQ->consumer_idx =
(circularQ->consumer_idx +
((le32_to_cpu(msgHeader_tmp) >> 24) &
- 0x1f)) % 256;
+ 0x1f)) % PM8001_MPI_QUEUE;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@@ -3357,7 +3360,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct fw_control_ex fw_control_context;
struct fw_flash_Update_resp *ppayload =
(struct fw_flash_Update_resp *)(piomb + 4);
- u32 tag = ppayload->tag;
+ u32 tag = le32_to_cpu(ppayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
status = le32_to_cpu(ppayload->status);
memcpy(&fw_control_context,
@@ -3703,8 +3706,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- u32 pHeader = (u32)*(u32 *)piomb;
- u8 opc = (u8)(pHeader & 0xFFF);
+ __le32 pHeader = *(__le32 *)piomb;
+ u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 1a4611eb0321..d437309cb1e1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -599,7 +599,7 @@ struct fw_flash_Update_req {
*
*/
struct fw_flash_Update_resp {
- dma_addr_t tag;
+ __le32 tag;
__le32 status;
u32 reserved[13];
} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 36efaa7c3a54..0267c22f8741 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -235,15 +235,15 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->memoryMap.region[PI].alignment = 4;
/* MPI Memory region 5 inbound queues */
- pm8001_ha->memoryMap.region[IB].num_elements = 256;
+ pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[IB].element_size = 64;
- pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
+ pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[IB].alignment = 64;
- /* MPI Memory region 6 inbound queues */
- pm8001_ha->memoryMap.region[OB].num_elements = 256;
+ /* MPI Memory region 6 outbound queues */
+ pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[OB].element_size = 64;
- pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
+ pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[OB].alignment = 64;
/* Memory region write DMA*/
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 07322ecff90d..61c82a345f82 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,6 +90,12 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
+#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
+/* sd and scsi_pm need to coordinate flushing async actions */
+LIST_HEAD(scsi_sd_probe_domain);
+EXPORT_SYMBOL(scsi_sd_probe_domain);
+#endif
+
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
* encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5dfd7495d1a1..62ddfd31d4ce 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2348,10 +2348,14 @@ EXPORT_SYMBOL(scsi_device_quiesce);
*
* Must be called with user context, may sleep.
*/
-void
-scsi_device_resume(struct scsi_device *sdev)
+void scsi_device_resume(struct scsi_device *sdev)
{
- if(scsi_device_set_state(sdev, SDEV_RUNNING))
+ /* check if the device state was mutated prior to resume, and if
+ * so assume the state is being managed elsewhere (for example
+ * device deleted during suspend)
+ */
+ if (sdev->sdev_state != SDEV_QUIESCE ||
+ scsi_device_set_state(sdev, SDEV_RUNNING))
return;
scsi_run_queue(sdev->request_queue);
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index c4670642d023..f661a41fa4c6 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -97,7 +97,7 @@ static int scsi_bus_prepare(struct device *dev)
{
if (scsi_is_sdev_device(dev)) {
/* sd probing uses async_schedule. Wait until it finishes. */
- async_synchronize_full();
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
} else if (scsi_is_host_device(dev)) {
/* Wait until async scanning is finished */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index be4fa6d179b1..07ce3f51701d 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -163,6 +163,8 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM_RUNTIME */
+extern struct list_head scsi_sd_probe_domain;
+
/*
* internal scsi timeout functions: for use by mid-layer and transport
* classes.
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 80fbe2ac0b47..579760420d53 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2808,17 +2808,20 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
FC_RPORT_DEVLOSS_PENDING |
FC_RPORT_DEVLOSS_CALLBK_DONE);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
+ scsi_target_unblock(&rport->dev);
+
+ spin_lock_irqsave(shost->host_lock,
+ flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost,
&rport->scan_work);
spin_unlock_irqrestore(shost->host_lock,
flags);
- scsi_target_unblock(&rport->dev);
- } else
- spin_unlock_irqrestore(shost->host_lock,
- flags);
+ }
fc_bsg_goose_queue(rport);
@@ -2876,16 +2879,17 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
if (fci->f->dd_fcrport_size)
memset(rport->dd_data, 0,
fci->f->dd_fcrport_size);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
+ scsi_target_unblock(&rport->dev);
- if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
+ spin_lock_irqsave(shost->host_lock, flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_unblock(&rport->dev);
- } else
- spin_unlock_irqrestore(shost->host_lock, flags);
-
+ }
return rport;
}
}
@@ -3083,12 +3087,12 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
/* ensure any stgt delete functions are done */
fc_flush_work(shost);
+ scsi_target_unblock(&rport->dev);
/* initiate a scan of the target */
spin_lock_irqsave(shost->host_lock, flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_unblock(&rport->dev);
}
}
EXPORT_SYMBOL(fc_remote_port_rolechg);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index a2715c31e754..cf08071a9b6e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1010,10 +1010,10 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
- if (unlikely(scsi_device_get(sdev)))
+ if (unlikely(spi_dv_in_progress(starget)))
return;
- if (unlikely(spi_dv_in_progress(starget)))
+ if (unlikely(scsi_device_get(sdev)))
return;
spi_dv_in_progress(starget) = 1;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5ba5c2a9e8e9..6f0a4c612b3b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -65,6 +65,7 @@
#include <scsi/scsicam.h>
#include "sd.h"
+#include "scsi_priv.h"
#include "scsi_logging.h"
MODULE_AUTHOR("Eric Youngdale");
@@ -2722,7 +2723,7 @@ static int sd_probe(struct device *dev)
dev_set_drvdata(dev, sdkp);
get_device(&sdkp->dev); /* prevent release before async_schedule */
- async_schedule(sd_probe_async, sdkp);
+ async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
return 0;
@@ -2756,7 +2757,7 @@ static int sd_remove(struct device *dev)
sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- async_synchronize_full();
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
device_del(&sdkp->dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index eacd46bb36b9..9c5c5f2b3962 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -104,7 +104,7 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
static int sg_add(struct device *, struct class_interface *);
static void sg_remove(struct device *, struct class_interface *);
-static DEFINE_MUTEX(sg_mutex);
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
@@ -137,13 +137,15 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
- volatile char done; /* 0->before bh, 1->before read, 2->read */
+ /* done protected by rq_list_lock */
+ char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
+ /* sfd_siblings is protected by sg_index_lock */
struct list_head sfd_siblings;
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
@@ -157,7 +159,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
- volatile char closed; /* 1 -> fd closed but request(s) outstanding */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
@@ -171,9 +172,11 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
+ /* sfds is protected by sg_index_lock */
struct list_head sfds;
volatile char detached; /* 0->attached, 1->detached pending removal */
- volatile char exclude; /* opened for exclusive access */
+ /* exclude protected by sg_open_exclusive_lock */
+ char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -221,6 +224,38 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
+static int get_exclude(Sg_device *sdp)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+ ret = sdp->exclude;
+ spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+ return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+ sdp->exclude = val;
+ spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+ return val;
+}
+
+static int sfds_list_empty(Sg_device *sdp)
+{
+ unsigned long flags;
+ int ret;
+
+ read_lock_irqsave(&sg_index_lock, flags);
+ ret = list_empty(&sdp->sfds);
+ read_unlock_irqrestore(&sg_index_lock, flags);
+ return ret;
+}
+
static int
sg_open(struct inode *inode, struct file *filp)
{
@@ -232,7 +267,6 @@ sg_open(struct inode *inode, struct file *filp)
int res;
int retval;
- mutex_lock(&sg_mutex);
nonseekable_open(inode, filp);
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
sdp = sg_get_dev(dev);
@@ -264,25 +298,22 @@ sg_open(struct inode *inode, struct file *filp)
retval = -EPERM; /* Can't lock it with read only access */
goto error_out;
}
- if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
+ if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
retval = -EBUSY;
goto error_out;
}
- res = 0;
- __wait_event_interruptible(sdp->o_excl_wait,
- ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
+ res = wait_event_interruptible(sdp->o_excl_wait,
+ ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
if (res) {
retval = res; /* -ERESTARTSYS because signal hit process */
goto error_out;
}
- } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
+ } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
if (flags & O_NONBLOCK) {
retval = -EBUSY;
goto error_out;
}
- res = 0;
- __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
- res);
+ res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
if (res) {
retval = res; /* -ERESTARTSYS because signal hit process */
goto error_out;
@@ -292,7 +323,7 @@ sg_open(struct inode *inode, struct file *filp)
retval = -ENODEV;
goto error_out;
}
- if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
+ if (sfds_list_empty(sdp)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
@@ -301,7 +332,7 @@ sg_open(struct inode *inode, struct file *filp)
filp->private_data = sfp;
else {
if (flags & O_EXCL) {
- sdp->exclude = 0; /* undo if error */
+ set_exclude(sdp, 0); /* undo if error */
wake_up_interruptible(&sdp->o_excl_wait);
}
retval = -ENOMEM;
@@ -317,7 +348,6 @@ sdp_put:
sg_put:
if (sdp)
sg_put_dev(sdp);
- mutex_unlock(&sg_mutex);
return retval;
}
@@ -332,9 +362,7 @@ sg_release(struct inode *inode, struct file *filp)
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
- sfp->closed = 1;
-
- sdp->exclude = 0;
+ set_exclude(sdp, 0);
wake_up_interruptible(&sdp->o_excl_wait);
scsi_autopm_put_device(sdp->device);
@@ -398,19 +426,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
retval = -EAGAIN;
goto free_old_hdr;
}
- while (1) {
- retval = 0; /* following macro beats race condition */
- __wait_event_interruptible(sfp->read_wait,
- (sdp->detached ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))),
- retval);
- if (sdp->detached) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (0 == retval)
- break;
-
+ retval = wait_event_interruptible(sfp->read_wait,
+ (sdp->detached ||
+ (srp = sg_get_rq_mark(sfp, req_pack_id))));
+ if (sdp->detached) {
+ retval = -ENODEV;
+ goto free_old_hdr;
+ }
+ if (retval) {
/* -ERESTARTSYS as signal hit process */
goto free_old_hdr;
}
@@ -771,7 +794,18 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
return 0;
}
-static int
+static int srp_done(Sg_fd *sfp, Sg_request *srp)
+{
+ unsigned long flags;
+ int ret;
+
+ read_lock_irqsave(&sfp->rq_list_lock, flags);
+ ret = srp->done;
+ read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+ return ret;
+}
+
+static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
@@ -791,40 +825,30 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
switch (cmd_in) {
case SG_IO:
- {
- int blocking = 1; /* ignore O_NONBLOCK flag */
-
- if (sdp->detached)
- return -ENODEV;
- if (!scsi_block_when_processing_errors(sdp->device))
- return -ENXIO;
- if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
- return -EFAULT;
- result =
- sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
- blocking, read_only, 1, &srp);
- if (result < 0)
- return result;
- while (1) {
- result = 0; /* following macro to beat race condition */
- __wait_event_interruptible(sfp->read_wait,
- (srp->done || sdp->detached),
- result);
- if (sdp->detached)
- return -ENODEV;
- write_lock_irq(&sfp->rq_list_lock);
- if (srp->done) {
- srp->done = 2;
- write_unlock_irq(&sfp->rq_list_lock);
- break;
- }
- srp->orphan = 1;
- write_unlock_irq(&sfp->rq_list_lock);
- return result; /* -ERESTARTSYS because signal hit process */
- }
+ if (sdp->detached)
+ return -ENODEV;
+ if (!scsi_block_when_processing_errors(sdp->device))
+ return -ENXIO;
+ if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
+ return -EFAULT;
+ result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
+ 1, read_only, 1, &srp);
+ if (result < 0)
+ return result;
+ result = wait_event_interruptible(sfp->read_wait,
+ (srp_done(sfp, srp) || sdp->detached));
+ if (sdp->detached)
+ return -ENODEV;
+ write_lock_irq(&sfp->rq_list_lock);
+ if (srp->done) {
+ srp->done = 2;
+ write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
+ srp->orphan = 1;
+ write_unlock_irq(&sfp->rq_list_lock);
+ return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
@@ -1091,18 +1115,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
}
}
-static long
-sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&sg_mutex);
- ret = sg_ioctl(filp, cmd_in, arg);
- mutex_unlock(&sg_mutex);
-
- return ret;
-}
-
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -1136,8 +1148,11 @@ sg_poll(struct file *filp, poll_table * wait)
int count = 0;
unsigned long iflags;
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
- || sfp->closed)
+ sfp = filp->private_data;
+ if (!sfp)
+ return POLLERR;
+ sdp = sfp->parentdp;
+ if (!sdp)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1347,7 +1362,7 @@ static const struct file_operations sg_fops = {
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
- .unlocked_ioctl = sg_unlocked_ioctl,
+ .unlocked_ioctl = sg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
@@ -2312,7 +2327,7 @@ struct sg_proc_leaf {
const struct file_operations * fops;
};
-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
{"allow_dio", &adio_fops},
{"debug", &debug_fops},
{"def_reserved_size", &dressz_fops},
@@ -2332,7 +2347,7 @@ sg_proc_init(void)
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
@@ -2533,9 +2548,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) fp->low_dma);
- seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
+ seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
- (int) fp->keep_orphan, (int) fp->closed);
+ (int) fp->keep_orphan);
for (m = 0, srp = fp->headrp;
srp != NULL;
++m, srp = srp->nextrp) {
@@ -2612,7 +2627,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
scsidp->lun,
scsidp->host->hostt->emulated);
seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, sdp->exclude);
+ sdp->sg_tablesize, get_exclude(sdp));
sg_proc_debug_helper(s, sdp);
}
read_unlock_irqrestore(&sg_index_lock, iflags);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index ea35632b986c..b548923785ed 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -35,8 +35,8 @@ struct st_request {
/* The tape buffer descriptor. */
struct st_buffer {
unsigned char dma; /* DMA-able buffer */
- unsigned char do_dio; /* direct i/o set up? */
unsigned char cleared; /* internal buffer cleared after open? */
+ unsigned short do_dio; /* direct i/o set up? */
int buffer_size;
int buffer_blocks;
int buffer_bytes;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 83a1972a1999..528d52beaa1c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -785,12 +785,22 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
/*
* If there is an error; offline the device since all
* error recovery strategies would have already been
- * deployed on the host side.
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
*/
- if (vm_srb->srb_status == SRB_STATUS_ERROR)
- scmnd->result = DID_TARGET_FAILURE << 16;
- else
- scmnd->result = vm_srb->scsi_status;
+ scmnd->result = vm_srb->scsi_status;
+
+ if (vm_srb->srb_status == SRB_STATUS_ERROR) {
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ }
+
/*
* If the LUN is invalid; remove the device.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 52b96e8bf92e..4e010b727818 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1032,11 +1032,11 @@ static int ufshcd_initialize_hba(struct ufs_hba *hba)
return -EIO;
/* Configure UTRL and UTMRL base address registers */
- writel(hba->utrdl_dma_addr,
- (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
writel(lower_32_bits(hba->utrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+ writel(upper_32_bits(hba->utrdl_dma_addr),
(hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
- writel(hba->utmrdl_dma_addr,
+ writel(lower_32_bits(hba->utmrdl_dma_addr),
(hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
writel(upper_32_bits(hba->utmrdl_dma_addr),
(hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
@@ -1160,7 +1160,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
- if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+ if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
task_result = FAILED;
} else {
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 988ba06b3ad6..c1260d80ef30 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -661,6 +661,8 @@ struct iscsi_reject {
#define ISCSI_DEF_TIME2WAIT 2
+#define ISCSI_NAME_LEN 224
+
/************************* RFC 3720 End *****************************/
#endif /* ISCSI_PROTO_H */
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a577a833603d..be3eb0bf1ac0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -103,6 +103,7 @@ enum sas_dev_type {
};
enum sas_protocol {
+ SAS_PROTOCOL_NONE = 0,
SAS_PROTOCOL_SATA = 0x01,
SAS_PROTOCOL_SMP = 0x02,
SAS_PROTOCOL_STP = 0x04,