From c5e039be7e81168a9156e801cfef2adae72e775b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:21:29 +0530 Subject: [SCSI] mpt2sas: Driver will use sas address instead of handle as a lookup The device driver was not handling updating device handles in all cases across diag resets. To fix this issue, the driver is converted to using sas address instead of handle as a lookup reference to the parent expander or sas_host. Also, for both expanders and sas host, the phy handle will be one unique handle. In the sas host case, the phy handle can be different for every phy, so the change is to set the handle to the handle of the first phy; every phy will be one single sas address(phy 0) instead of a different sas address for every phy(previous implementation). So making one consistent sas address for all the direct attachedports to the sas host, will make it better user experience when using udev /dev/disk/by-path dev nodes Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 20 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 373 ++++++++++++++++++------------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 72 +++--- 3 files changed, 255 insertions(+), 210 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 0cf6bc236e4d..fa99ff204e46 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -278,7 +278,7 @@ struct _internal_cmd { * @sas_address: device sas address * @device_name: retrieved from the SAS IDENTIFY frame. * @handle: device handle - * @parent_handle: handle to parent device + * @sas_address_parent: sas address of parent expander or sas host * @enclosure_handle: enclosure handle * @enclosure_logical_id: enclosure logical identifier * @volume_handle: volume handle (valid when hidden raid member) @@ -296,7 +296,7 @@ struct _sas_device { u64 sas_address; u64 device_name; u16 handle; - u16 parent_handle; + u64 sas_address_parent; u16 enclosure_handle; u64 enclosure_logical_id; u16 volume_handle; @@ -352,8 +352,6 @@ struct _boot_device { /** * struct _sas_port - wide/narrow sas port information * @port_list: list of ports belonging to expander - * @handle: device handle for this port - * @sas_address: sas address of this port * @num_phys: number of phys belonging to this port * @remote_identify: attached device identification * @rphy: sas transport rphy object @@ -362,8 +360,6 @@ struct _boot_device { */ struct _sas_port { struct list_head port_list; - u16 handle; - u64 sas_address; u8 num_phys; struct sas_identify remote_identify; struct sas_rphy *rphy; @@ -398,7 +394,7 @@ struct _sas_phy { * @num_phys: number phys belonging to this sas_host/expander * @sas_address: sas address of this sas_host/expander * @handle: handle for this sas_host/expander - * @parent_handle: parent handle + * @sas_address_parent: sas address of parent expander or sas host * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding @@ -411,7 +407,7 @@ struct _sas_node { u8 num_phys; u64 sas_address; u16 handle; - u16 parent_handle; + u64 sas_address_parent; u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; @@ -890,15 +886,15 @@ void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, - u16 handle, u16 parent_handle); + u16 handle, u64 sas_address); void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, - u16 parent_handle); + u64 sas_address_parent); int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); -void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, - u16 attached_handle, u8 phy_number, u8 link_rate); +void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); extern struct sas_function_template mpt2sas_transport_functions; extern struct scsi_transport_template *mpt2sas_transport_template; extern int scsi_internal_device_block(struct scsi_device *sdev); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 86ab32d7ab15..8822cda852ba 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -316,6 +316,47 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name, return rc; } +/** + * _scsih_get_sas_address - set the sas_address for given device handle + * @handle: device handle + * @sas_address: sas address + * + * Returns 0 success, non-zero when failure + */ +static int +_scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 ioc_status; + + if (handle <= ioc->sas_hba.num_phys) { + *sas_address = ioc->sas_hba.sas_address; + return 0; + } else + *sas_address = 0; + + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" + "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; + } + + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; +} + /** * _scsih_determine_boot_device - determine boot device. * @ioc: per adapter object @@ -510,8 +551,6 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; - u16 handle, parent_handle; - u64 sas_address; dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: handle" "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, @@ -521,10 +560,8 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, list_add_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - handle = sas_device->handle; - parent_handle = sas_device->parent_handle; - sas_address = sas_device->sas_address; - if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) + if (!mpt2sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) _scsih_sas_device_remove(ioc, sas_device); } @@ -552,31 +589,6 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc, _scsih_determine_boot_device(ioc, sas_device, 0); } -/** - * mpt2sas_scsih_expander_find_by_handle - expander device search - * @ioc: per adapter object - * @handle: expander handle (assigned by firmware) - * Context: Calling function should acquire ioc->sas_device_lock - * - * This searches for expander device based on handle, then returns the - * sas_node object. - */ -struct _sas_node * -mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) -{ - struct _sas_node *sas_expander, *r; - - r = NULL; - list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { - if (sas_expander->handle != handle) - continue; - r = sas_expander; - goto out; - } - out: - return r; -} - /** * _scsih_raid_device_find_by_id - raid device search * @ioc: per adapter object @@ -698,6 +710,31 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } +/** + * mpt2sas_scsih_expander_find_by_handle - expander device search + * @ioc: per adapter object + * @handle: expander handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for expander device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + /** * mpt2sas_scsih_expander_find_by_sas_address - expander device search * @ioc: per adapter object @@ -3344,7 +3381,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object - * @update: update link information * Context: user * * During port enable, fw will send topology events for every device. Its @@ -3354,13 +3390,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) * Return nothing. */ static void -_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) +_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc) { u16 sz; u16 ioc_status; int i; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "updating handles for sas_host(0x%016llx)\n", @@ -3374,27 +3411,24 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) ioc->name, __FILE__, __LINE__, __func__); return; } - if (!(mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, - sas_iounit_pg0, sz))) { - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) - goto out; - for (i = 0; i < ioc->sas_hba.num_phys ; i++) { - ioc->sas_hba.phy[i].handle = - le16_to_cpu(sas_iounit_pg0->PhyData[i]. - ControllerDevHandle); - if (update) - mpt2sas_transport_update_links( - ioc, - ioc->sas_hba.phy[i].handle, - le16_to_cpu(sas_iounit_pg0->PhyData[i]. - AttachedDevHandle), i, - sas_iounit_pg0->PhyData[i]. - NegotiatedLinkRate >> 4); - } - } + if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, sas_iounit_pg0->PhyData[i]. + NegotiatedLinkRate >> 4); + } out: kfree(sas_iounit_pg0); } @@ -3507,19 +3541,21 @@ _scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc) ioc->name, __FILE__, __LINE__, __func__); goto out; } - ioc->sas_hba.phy[i].handle = - le16_to_cpu(sas_iounit_pg0->PhyData[i].ControllerDevHandle); + + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; ioc->sas_hba.phy[i].phy_id = i; mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], phy_pg0, ioc->sas_hba.parent_dev); } if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, - MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.phy[0].handle))) { + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } - ioc->sas_hba.handle = le16_to_cpu(sas_device_pg0.DevHandle); ioc->sas_hba.enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); @@ -3562,7 +3598,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) Mpi2SasEnclosurePage0_t enclosure_pg0; u32 ioc_status; u16 parent_handle; - __le64 sas_address; + __le64 sas_address, sas_address_parent = 0; int i; unsigned long flags; struct _sas_port *mpt2sas_port = NULL; @@ -3591,10 +3627,16 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) /* handle out of order topology events */ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); - if (parent_handle >= ioc->sas_hba.num_phys) { + if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) + != 0) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if (sas_address_parent != ioc->sas_hba.sas_address) { spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, - parent_handle); + sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_expander) { rc = _scsih_expander_add(ioc, parent_handle); @@ -3622,14 +3664,12 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) sas_expander->handle = handle; sas_expander->num_phys = expander_pg0.NumPhys; - sas_expander->parent_handle = parent_handle; - sas_expander->enclosure_handle = - le16_to_cpu(expander_pg0.EnclosureHandle); + sas_expander->sas_address_parent = sas_address_parent; sas_expander->sas_address = sas_address; printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x)," " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, - handle, sas_expander->parent_handle, (unsigned long long) + handle, parent_handle, (unsigned long long) sas_expander->sas_address, sas_expander->num_phys); if (!sas_expander->num_phys) @@ -3645,7 +3685,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) INIT_LIST_HEAD(&sas_expander->sas_port_list); mpt2sas_port = mpt2sas_transport_port_add(ioc, handle, - sas_expander->parent_handle); + sas_address_parent); if (!mpt2sas_port) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -3691,7 +3731,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) if (mpt2sas_port) mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, - sas_expander->parent_handle); + sas_address_parent); kfree(sas_expander); return rc; } @@ -3699,12 +3739,12 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) /** * _scsih_expander_remove - removing expander object * @ioc: per adapter object - * @handle: expander handle + * @sas_address: expander sas_address * * Return nothing. */ static void -_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_node *sas_expander; unsigned long flags; @@ -3713,7 +3753,8 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) return; spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); + sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, + sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); _scsih_expander_node_remove(ioc, sas_expander); } @@ -3805,8 +3846,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) } sas_device->handle = handle; - sas_device->parent_handle = - le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (_scsih_get_sas_address(ioc, le16_to_cpu + (sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); sas_device->enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); sas_device->slot = @@ -3836,43 +3880,39 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) /** * _scsih_remove_device - removing sas device object * @ioc: per adapter object - * @handle: sas device handle + * @sas_device: the sas_device object * * Return nothing. */ static void -_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device + *sas_device) { struct MPT2SAS_TARGET *sas_target_priv_data; - struct _sas_device *sas_device; - unsigned long flags; Mpi2SasIoUnitControlReply_t mpi_reply; Mpi2SasIoUnitControlRequest_t mpi_request; - u16 device_handle; + u16 device_handle, handle; - /* lookup sas_device */ - spin_lock_irqsave(&ioc->sas_device_lock, flags); - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); - if (!sas_device) { - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) return; - } - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle" - "(0x%04x)\n", ioc->name, __func__, handle)); + handle = sas_device->handle; + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x)," + " sas_addr(0x%016llx)\n", ioc->name, __func__, handle, + (unsigned long long) sas_device->sas_address)); if (sas_device->starget && sas_device->starget->hostdata) { sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; } - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (ioc->remove_host) + if (ioc->remove_host || ioc->shost_recovery || !handle) goto out; if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip " - "target_reset handle(0x%04x)\n", ioc->name, handle)); + "target_reset handle(0x%04x)\n", ioc->name, + handle)); goto skip_tr; } @@ -3925,10 +3965,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) _scsih_ublock_io_device(ioc, handle); mpt2sas_transport_port_remove(ioc, sas_device->sas_address, - sas_device->parent_handle); + sas_device->sas_address_parent); printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" - "(0x%016llx)\n", ioc->name, sas_device->handle, + "(0x%016llx)\n", ioc->name, handle, (unsigned long long) sas_device->sas_address); _scsih_sas_device_remove(ioc, sas_device); @@ -4031,8 +4071,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u16 reason_code; u8 phy_number; struct _sas_node *sas_expander; + struct _sas_device *sas_device; + u64 sas_address; unsigned long flags; - u8 link_rate_; + u8 link_rate; Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING @@ -4040,10 +4082,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, _scsih_sas_topology_change_event_debug(ioc, event_data); #endif + if (ioc->shost_recovery) + return; + if (!ioc->sas_hba.num_phys) _scsih_sas_host_add(ioc); else - _scsih_sas_host_refresh(ioc, 0); + _scsih_sas_host_refresh(ioc); if (fw_event->ignore) { dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ignoring expander " @@ -4058,6 +4103,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, if (_scsih_expander_add(ioc, parent_handle) != 0) return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, + parent_handle); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + sas_address = sas_expander->sas_address; + else if (parent_handle < ioc->sas_hba.num_phys) + sas_address = ioc->sas_hba.sas_address; + else + return; + /* handle siblings events */ for (i = 0; i < event_data->NumEntries; i++) { if (fw_event->ignore) { @@ -4077,48 +4133,40 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; - link_rate_ = event_data->PHY[i].LinkRate >> 4; + link_rate = event_data->PHY[i].LinkRate >> 4; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: - if (!parent_handle) { - if (phy_number < ioc->sas_hba.num_phys) - mpt2sas_transport_update_links( - ioc, - ioc->sas_hba.phy[phy_number].handle, - handle, phy_number, link_rate_); - } else { - spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_expander = - mpt2sas_scsih_expander_find_by_handle(ioc, - parent_handle); - spin_unlock_irqrestore(&ioc->sas_node_lock, - flags); - if (sas_expander) { - if (phy_number < sas_expander->num_phys) - mpt2sas_transport_update_links( - ioc, - sas_expander-> - phy[phy_number].handle, - handle, phy_number, - link_rate_); - } - } + + mpt2sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { - if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5) - break; _scsih_add_device(ioc, handle, phy_number, 0); } break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: - _scsih_remove_device(ioc, handle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, + handle); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + break; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + _scsih_remove_device(ioc, sas_device); break; } } /* handle expander removal */ - if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) - _scsih_expander_remove(ioc, parent_handle); + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && + sas_expander) + _scsih_expander_remove(ioc, sas_address); } @@ -4570,7 +4618,7 @@ _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; - _scsih_remove_device(ioc, handle); + _scsih_remove_device(ioc, sas_device); } /** @@ -4591,6 +4639,8 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; + u64 sas_address; + u16 parent_handle; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); @@ -4615,9 +4665,10 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, return; } - mpt2sas_transport_update_links(ioc, - le16_to_cpu(sas_device_pg0.ParentDevHandle), - handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt2sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 1); } @@ -4857,7 +4908,7 @@ static void _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { - u16 handle; + u16 handle, parent_handle; u32 state; struct _sas_device *sas_device; unsigned long flags; @@ -4865,6 +4916,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; + u64 sas_address; if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) return; @@ -4906,9 +4958,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, return; } - mpt2sas_transport_update_links(ioc, - le16_to_cpu(sas_device_pg0.ParentDevHandle), - handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt2sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 1); @@ -5252,18 +5305,23 @@ _scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, { struct _sas_node *sas_expander; unsigned long flags; + int i; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { - if (sas_expander->sas_address == sas_address) { - sas_expander->responding = 1; - if (sas_expander->handle != handle) { - printk(KERN_INFO "old handle(0x%04x)\n", - sas_expander->handle); - sas_expander->handle = handle; - } + if (sas_expander->sas_address != sas_address) + continue; + sas_expander->responding = 1; + if (sas_expander->handle == handle) goto out; - } + printk(KERN_INFO "\texpander(0x%016llx): handle changed" + " from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + goto out; } out: spin_unlock_irqrestore(&ioc->sas_node_lock, flags); @@ -5340,7 +5398,9 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); - _scsih_remove_device(ioc, sas_device->handle); + /* invalidate the device handle */ + sas_device->handle = 0; + _scsih_remove_device(ioc, sas_device); } list_for_each_entry_safe(raid_device, raid_device_next, @@ -5366,7 +5426,7 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) sas_expander->responding = 0; continue; } - _scsih_expander_remove(ioc, sas_expander->handle); + _scsih_expander_remove(ioc, sas_expander->sas_address); goto retry_expander_search; } } @@ -5406,7 +5466,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) case MPT2_IOC_DONE_RESET: dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); - _scsih_sas_host_refresh(ioc, 0); + _scsih_sas_host_refresh(ioc); _scsih_search_responding_sas_devices(ioc); _scsih_search_responding_raid_devices(ioc); _scsih_search_responding_expanders(ioc); @@ -5646,7 +5706,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) continue; - _scsih_remove_device(ioc, sas_device->handle); + _scsih_remove_device(ioc, sas_device); if (ioc->shost_recovery) return; goto retry_device_search; @@ -5669,7 +5729,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!expander_sibling) continue; - _scsih_expander_remove(ioc, expander_sibling->handle); + _scsih_expander_remove(ioc, + expander_sibling->sas_address); if (ioc->shost_recovery) return; goto retry_expander_search; @@ -5677,7 +5738,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, } mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, - sas_expander->parent_handle); + sas_expander->sas_address_parent); printk(MPT2SAS_INFO_FMT "expander_remove: handle" "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, @@ -5726,7 +5787,7 @@ _scsih_remove(struct pci_dev *pdev) mpt2sas_scsih_sas_device_find_by_sas_address(ioc, mpt2sas_port->remote_identify.sas_address); if (sas_device) { - _scsih_remove_device(ioc, sas_device->handle); + _scsih_remove_device(ioc, sas_device); goto retry_again; } } else { @@ -5735,7 +5796,7 @@ _scsih_remove(struct pci_dev *pdev) mpt2sas_port->remote_identify.sas_address); if (expander_sibling) { _scsih_expander_remove(ioc, - expander_sibling->handle); + expander_sibling->sas_address); goto retry_again; } } @@ -5770,7 +5831,8 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) void *device; struct _sas_device *sas_device; struct _raid_device *raid_device; - u16 handle, parent_handle; + u16 handle; + u64 sas_address_parent; u64 sas_address; unsigned long flags; int rc; @@ -5799,17 +5861,17 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) } else { sas_device = device; handle = sas_device->handle; - parent_handle = sas_device->parent_handle; + sas_address_parent = sas_device->sas_address_parent; sas_address = sas_device->sas_address; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!mpt2sas_transport_port_add(ioc, sas_device->handle, - sas_device->parent_handle)) { + sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { mpt2sas_transport_port_remove(ioc, sas_address, - parent_handle); + sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } @@ -5849,8 +5911,6 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *next; unsigned long flags; - u16 handle, parent_handle; - u64 sas_address; /* SAS Device List */ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, @@ -5859,14 +5919,13 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - handle = sas_device->handle; - parent_handle = sas_device->parent_handle; - sas_address = sas_device->sas_address; - if (!mpt2sas_transport_port_add(ioc, handle, parent_handle)) { + if (!mpt2sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { - mpt2sas_transport_port_remove(ioc, sas_address, - parent_handle); + mpt2sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index eb98188c7f3f..8030bc2774c8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -59,24 +59,23 @@ #include "mpt2sas_base.h" /** - * _transport_sas_node_find_by_handle - sas node search + * _transport_sas_node_find_by_sas_address - sas node search * @ioc: per adapter object - * @handle: expander or hba handle (assigned by firmware) + * @sas_address: sas address of expander or sas host * Context: Calling function should acquire ioc->sas_node_lock. * * Search for either hba phys or expander device based on handle, then returns * the sas_node object. */ static struct _sas_node * -_transport_sas_node_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, + u64 sas_address) { - int i; - - for (i = 0; i < ioc->sas_hba.num_phys; i++) - if (ioc->sas_hba.phy[i].handle == handle) - return &ioc->sas_hba; - - return mpt2sas_scsih_expander_find_by_handle(ioc, handle); + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return mpt2sas_scsih_expander_find_by_sas_address(ioc, + sas_address); } /** @@ -469,7 +468,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, * mpt2sas_transport_port_add - insert port to the list * @ioc: per adapter object * @handle: handle of attached device - * @parent_handle: parent handle(either hba or expander) + * @sas_address: sas address of parent expander or sas host * Context: This function will acquire ioc->sas_node_lock. * * Adding new port object to the sas_node->sas_port_list. @@ -478,7 +477,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, */ struct _sas_port * mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, - u16 parent_handle) + u64 sas_address) { struct _sas_phy *mpt2sas_phy, *next; struct _sas_port *mpt2sas_port; @@ -488,9 +487,6 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, int i; struct sas_port *port; - if (!parent_handle) - return NULL; - mpt2sas_port = kzalloc(sizeof(struct _sas_port), GFP_KERNEL); if (!mpt2sas_port) { @@ -502,17 +498,16 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, INIT_LIST_HEAD(&mpt2sas_port->port_list); INIT_LIST_HEAD(&mpt2sas_port->phy_list); spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_node) { - printk(MPT2SAS_ERR_FMT "%s: Could not find parent(0x%04x)!\n", - ioc->name, __func__, parent_handle); + printk(MPT2SAS_ERR_FMT "%s: Could not find " + "parent sas_address(0x%016llx)!\n", ioc->name, + __func__, (unsigned long long)sas_address); goto out_fail; } - mpt2sas_port->handle = parent_handle; - mpt2sas_port->sas_address = sas_node->sas_address; if ((_transport_set_identify(ioc, handle, &mpt2sas_port->remote_identify))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", @@ -604,7 +599,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, * mpt2sas_transport_port_remove - remove port from the list * @ioc: per adapter object * @sas_address: sas address of attached device - * @parent_handle: handle to the upstream parent(either hba or expander) + * @sas_address_parent: sas address of parent expander or sas host * Context: This function will acquire ioc->sas_node_lock. * * Removing object and freeing associated memory from the @@ -614,7 +609,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, */ void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, - u16 parent_handle) + u64 sas_address_parent) { int i; unsigned long flags; @@ -624,7 +619,8 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, struct _sas_phy *mpt2sas_phy, *next_phy; spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_node = _transport_sas_node_find_by_handle(ioc, parent_handle); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address_parent); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_node) return; @@ -650,8 +646,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, &mpt2sas_port->phy_list, port_siblings) { if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) dev_printk(KERN_INFO, &mpt2sas_port->port->dev, - "remove: parent_handle(0x%04x), " - "sas_addr(0x%016llx), phy(%d)\n", parent_handle, + "remove: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) mpt2sas_port->remote_identify.sas_address, mpt2sas_phy->phy_id); @@ -799,8 +794,8 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy /** * mpt2sas_transport_update_links - refreshing phy link changes * @ioc: per adapter object - * @handle: handle to sas_host or expander - * @attached_handle: attached device handle + * @sas_address: sas address of parent expander or sas host + * @handle: attached device handle * @phy_numberv: phy number * @link_rate: new link rate * @@ -808,28 +803,25 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy */ void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, - u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate) + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate) { unsigned long flags; struct _sas_node *sas_node; struct _sas_phy *mpt2sas_phy; - if (ioc->shost_recovery) { - printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", - __func__, ioc->name); + if (ioc->shost_recovery) return; - } spin_lock_irqsave(&ioc->sas_node_lock, flags); - sas_node = _transport_sas_node_find_by_handle(ioc, handle); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_node) return; mpt2sas_phy = &sas_node->phy[phy_number]; - mpt2sas_phy->attached_handle = attached_handle; - if (attached_handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) - _transport_set_identify(ioc, mpt2sas_phy->attached_handle, + mpt2sas_phy->attached_handle = handle; + if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) + _transport_set_identify(ioc, handle, &mpt2sas_phy->remote_identify); else memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct @@ -841,13 +833,11 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, - "refresh: handle(0x%04x), sas_addr(0x%016llx),\n" + "refresh: parent sas_addr(0x%016llx),\n" "\tlink_rate(0x%02x), phy(%d)\n" "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", - handle, (unsigned long long) - mpt2sas_phy->identify.sas_address, link_rate, - phy_number, attached_handle, - (unsigned long long) + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) mpt2sas_phy->remote_identify.sas_address); } -- cgit v1.2.3-59-g8ed1b From a28eb222e3890a4ce190a430e24c483d2b5bb13b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:22:37 +0530 Subject: [SCSI] mpt2sas: Expander remove fails when it is processing another expander add. This handles the case where driver receives a expander removal event while it is in the middle of processing an expander add event. The existing implementation will stop processing futher device adds when a expander delete arrives on top of add expander add. Due to a sanity check in the driver, the devices there were not added, were never handshaked to firmware with the device removal handshake protocal. Since the driver didnt' do the handshake, the controller never provide further add events. To fix this issue, the sanity check was removed so the driver will always do the device removal handshake protocal. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 109 ++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 47 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 8822cda852ba..d4e890d8b992 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2386,16 +2386,10 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); - if (!sas_device) { - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", - ioc->name, __func__); - return; - } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); /* skip is hidden raid component */ - if (sas_device->hidden_raid_component) + if (sas_device && sas_device->hidden_raid_component) return; smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); @@ -2408,18 +2402,31 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); - if (sas_device->starget) + if (sas_device && sas_device->starget) { dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, "DELAYED:tr:handle(0x%04x), " - "(open)\n", sas_device->handle)); + "(open)\n", handle)); + } else { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + } return; } - if (sas_device->starget && sas_device->starget->hostdata) { - sas_target_priv_data = sas_device->starget->hostdata; - sas_target_priv_data->tm_busy = 1; - dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, - "tr:handle(0x%04x), (open)\n", sas_device->handle)); + if (sas_device) { + sas_device->state |= MPTSAS_STATE_TR_SEND; + sas_device->state |= MPT2SAS_REQ_SAS_CNTRL; + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->tm_busy = 1; + dewtprintk(ioc, starget_printk(KERN_INFO, + sas_device->starget, "tr:handle(0x%04x), (open)\n", + handle)); + } + } else { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT + "tr:handle(0x%04x), (open)\n", ioc->name, handle)); } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); @@ -2427,8 +2434,6 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - sas_device->state |= MPTSAS_STATE_TR_SEND; - sas_device->state |= MPT2SAS_REQ_SAS_CNTRL; mpt2sas_base_put_smid_hi_priority(ioc, smid); } @@ -2463,21 +2468,25 @@ _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); - if (!sas_device) { - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", - ioc->name, __func__); - return 1; - } - sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (sas_device->starget) - dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, + if (sas_device) { + sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE; + if (sas_device->starget) + dewtprintk(ioc, starget_printk(KERN_INFO, + sas_device->starget, + "sc_complete:handle(0x%04x), " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + handle, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_complete:handle(0x%04x), " "ioc_status(0x%04x), loginfo(0x%08x)\n", - handle, le16_to_cpu(mpi_reply->IOCStatus), + ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo))); + } + return 1; } @@ -2515,28 +2524,33 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, handle = le16_to_cpu(mpi_reply->DevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); - if (!sas_device) { - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - printk(MPT2SAS_ERR_FMT "%s: failed finding sas_device\n", - ioc->name, __func__); - return 1; - } - sas_device->state |= MPTSAS_STATE_TR_COMPLETE; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (sas_device->starget) - dewtprintk(ioc, starget_printk(KERN_INFO, sas_device->starget, - "tr_complete:handle(0x%04x), (%s) ioc_status(0x%04x), " - "loginfo(0x%08x), completed(%d)\n", - sas_device->handle, (sas_device->state & - MPT2SAS_REQ_SAS_CNTRL) ? "open" : "active", - le16_to_cpu(mpi_reply->IOCStatus), + if (sas_device) { + sas_device->state |= MPTSAS_STATE_TR_COMPLETE; + if (sas_device->starget) { + dewtprintk(ioc, starget_printk(KERN_INFO, + sas_device->starget, "tr_complete:handle(0x%04x), " + "(%s) ioc_status(0x%04x), loginfo(0x%08x), " + "completed(%d)\n", sas_device->handle, + (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ? + "open" : "active", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (sas_device->starget->hostdata) { + sas_target_priv_data = + sas_device->starget->hostdata; + sas_target_priv_data->tm_busy = 0; + } + } + } else { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT + "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); - - if (sas_device->starget && sas_device->starget->hostdata) { - sas_target_priv_data = sas_device->starget->hostdata; - sas_target_priv_data->tm_busy = 0; } if (!list_empty(&ioc->delayed_tr_list)) { @@ -2551,8 +2565,7 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, } else rc = 1; - - if (!(sas_device->state & MPT2SAS_REQ_SAS_CNTRL)) + if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL)) return rc; if (ioc->shost_recovery) { @@ -2568,12 +2581,14 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, return rc; } + if (sas_device) + sas_device->state |= MPTSAS_STATE_CNTRL_SEND; + mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; mpi_request->DevHandle = mpi_reply->DevHandle; - sas_device->state |= MPTSAS_STATE_CNTRL_SEND; mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); return rc; } -- cgit v1.2.3-59-g8ed1b From 9982f59450930138eb0bf9a4ebf865e8c06ba705 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:23:07 +0530 Subject: [SCSI] mpt2sas: Add support in the driver to check for valid response info Add support in the driver to check for valid response info in the scsi state, then check to see if the response code is MPI2_SCSITASKMGMT_RSP_INVALID_FRAME; when this condition occurrs, the driver will return DID_SOFT_ERROR. A return code of DID_SOFT_ERROR will result in a retry at the scsi-mid layer level. An additional change added to obtain the response code from the 1st byte of the response info instead of last. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index d4e890d8b992..efb6270cf261 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -3111,7 +3111,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32_to_cpu(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; - _scsih_response_code(ioc, response_bytes[3]); + _scsih_response_code(ioc, response_bytes[0]); } } #endif @@ -3229,7 +3229,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) u8 scsi_status; u32 log_info; struct MPT2SAS_DEVICE *sas_device_priv_data; - u32 response_code; + u32 response_code = 0; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); scmd = _scsih_scsi_lookup_get(ioc, smid); @@ -3251,16 +3251,16 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } /* turning off TLR */ + scsi_state = mpi_reply->SCSIState; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = + le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; if (!sas_device_priv_data->tlr_snoop_check) { sas_device_priv_data->tlr_snoop_check++; - if (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) { - response_code = (le32_to_cpu(mpi_reply->ResponseInfo) - >> 24); - if (response_code == - MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) - sas_device_priv_data->flags &= - ~MPT_DEVICE_TLR_ON; - } + if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= + ~MPT_DEVICE_TLR_ON; } xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); @@ -3271,7 +3271,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) else log_info = 0; ioc_status &= MPI2_IOCSTATUS_MASK; - scsi_state = mpi_reply->SCSIState; scsi_status = mpi_reply->SCSIStatus; if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && @@ -3356,8 +3355,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; - if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | - MPI2_SCSI_STATE_NO_SCSI_STATUS)) + if (response_code == + MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) scmd->result = DID_RESET << 16; -- cgit v1.2.3-59-g8ed1b From db27136a89d061bf9dceb28953a61a8ef862ca7f Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:24:27 +0530 Subject: [SCSI] mpt2sas: New device SAS2208 support is added Added device ids range for { 0x80 - 87 } , modified mpi/mpi2_cnfg.h containing MPI2_MFGPAGE_DEVID_SAS2208_X. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | 8 ++++++++ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index ab47c4679640..5af66dbe3239 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -348,6 +348,14 @@ typedef struct _MPI2_CONFIG_REPLY #define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087) /* Manufacturing Page 0 */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index efb6270cf261..91d61154a46c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -196,10 +196,28 @@ static struct pci_device_id scsih_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, PCI_ANY_ID, PCI_ANY_ID }, + /* Meteor ~ 2116 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, PCI_ANY_ID, PCI_ANY_ID }, + /* Thunderbolt ~ 2208 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8, + PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, scsih_pci_table); -- cgit v1.2.3-59-g8ed1b From 9fec5f9fc2fbe7c6e39db01ae296528d9a20a5b1 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:26:20 +0530 Subject: [SCSI] mpt2sas: Adding MPI Headers - revision L The new headers contain the following changes: (1) Added IO Unit Page 7. (2) Added new device ids for SAS2208. (3) Added SAS IO Unit Page 5. (4) Added partial and slumber power management capable flags to SAS Device Page 0 Flags field. (5) Added PhyInfo defines for power condition. (6) Added Ethernet configuration pages. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpi/mpi2.h | 5 +- drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | 266 ++++++++++++++++++++++++++++++++++- drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | 18 ++- drivers/scsi/mpt2sas/mpi/mpi2_raid.h | 14 +- drivers/scsi/mpt2sas/mpi/mpi2_tool.h | 16 ++- 5 files changed, 305 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index f9f6c0839276..914168105297 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.12 + * mpi2.h Version: 02.00.13 * * Version History * --------------- @@ -52,6 +52,7 @@ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those * bytes reserved. * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -77,7 +78,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x0C) +#define MPI2_HEADER_VERSION_UNIT (0x0D) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 5af66dbe3239..1611c57a6fdf 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.11 + * mpi2_cnfg.h Version: 02.00.12 * * Version History * --------------- @@ -100,6 +100,13 @@ * Added expander reduced functionality data to SAS * Expander Page 0. * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. * -------------------------------------------------------------------------- */ @@ -182,6 +189,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION #define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) /***************************************************************************** @@ -268,6 +276,14 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION #define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) +/* Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + + /**************************************************************************** * Configuration messages ****************************************************************************/ @@ -348,6 +364,7 @@ typedef struct _MPI2_CONFIG_REPLY #define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + #define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) #define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) #define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) @@ -795,6 +812,56 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { #define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) +/* IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 Reserved1; /* 0x04 */ + U8 PCIeWidth; /* 0x06 */ + U8 PCIeSpeed; /* 0x07 */ + U32 ProcessorState; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U16 IOCTemperature; /* 0x10 */ + U8 IOCTemperatureUnits; /* 0x12 */ + U8 IOCSpeed; /* 0x13 */ + U32 Reserved3; /* 0x14 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x00) + +/* defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) + +/* defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) + +/* defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/* defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/* defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + + + /**************************************************************************** * IOC Config Pages ****************************************************************************/ @@ -1478,6 +1545,12 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 /* values for PhyInfo fields */ #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + #define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) #define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) #define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) @@ -1690,11 +1763,11 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 /* values for SAS IO Unit Page 1 PortFlags */ #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) -/* values for SAS IO Unit Page 2 PhyFlags */ +/* values for SAS IO Unit Page 1 PhyFlags */ #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) -/* values for SAS IO Unit Page 0 MaxMinLinkRate */ +/* values for SAS IO Unit Page 1 MaxMinLinkRate */ #define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) #define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) #define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) @@ -1753,6 +1826,74 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 #define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) +/* SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { + U8 ControlFlags; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 InactivityTimerExponent; /* 0x02 */ + U8 SATAPartialTimeout; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U8 SATASlumberTimeout; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U8 SASPartialTimeout; /* 0x08 */ + U8 Reserved4; /* 0x09 */ + U8 SASSlumberTimeout; /* 0x0A */ + U8 Reserved5; /* 0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t; + +/* defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/* defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.ExtPageLength or NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings + [MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x00) + + + + /**************************************************************************** * SAS Expander Config Pages ****************************************************************************/ @@ -1935,6 +2076,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ /* values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) #define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) #define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) @@ -2351,5 +2494,122 @@ typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 #define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/* Ethernet Page 0 */ + +/* IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR { + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumInterfaces; /* 0x08 */ + U8 Reserved0; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Status; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved2; /* 0x11 */ + U16 Reserved3; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved4; /* 0x1A */ + U8 Reserved5; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */ + U8 HostName + [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/* values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/* values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/* Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved0; /* 0x08 */ + U32 Flags; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved1; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved3; /* 0x1A */ + U8 Reserved4; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */ + U32 Reserved5; /* 0x6C */ + U32 Reserved6; /* 0x70 */ + U32 Reserved7; /* 0x74 */ + U32 Reserved8; /* 0x78 */ + U8 HostName + [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/* values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/* values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + #endif diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index c294128bdeb4..ea51ce868690 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.11 + * mpi2_ioc.h Version: 02.00.12 * * Version History * --------------- @@ -84,6 +84,9 @@ * Added two new reason codes for SAS Device Status Change * Event. * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. * -------------------------------------------------------------------------- */ @@ -274,6 +277,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY #define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) #define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) #define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) #define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) #define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) @@ -448,6 +452,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY #define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) #define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) #define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) /* Log Entry Added Event data */ @@ -469,6 +474,16 @@ typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; +/* GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { + U8 GPIONum; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; + /* Hard Reset Received Event data */ typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED @@ -1117,6 +1132,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) /* SAS */ #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011) /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 7134816d9046..5160c33d2a00 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h @@ -6,7 +6,7 @@ * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.03 + * mpi2_raid.h Version: 02.00.04 * * Version History * --------------- @@ -20,6 +20,8 @@ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. * -------------------------------------------------------------------------- */ @@ -217,10 +219,14 @@ typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT /* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ /* defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/* The following is an obsolete define. + * It must be shifted left 24 bits in order to set the proper bit. + */ #define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) -#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x04) -#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x02) -#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x01) /* RAID Online Capacity Expansion Structure */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 007e950f7bfa..73fcdbf92632 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -6,7 +6,7 @@ * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.03 + * mpi2_tool.h Version: 02.00.04 * * Version History * --------------- @@ -18,6 +18,10 @@ * structures and defines. * 02-29-08 02.00.02 Modified various names to make them 32-character unique. * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. * -------------------------------------------------------------------------- */ @@ -282,7 +286,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { - U8 Reserved1; /* 0x00 */ + U8 ExtendedType; /* 0x00 */ U8 BufferType; /* 0x01 */ U8 ChainOffset; /* 0x02 */ U8 Function; /* 0x03 */ @@ -301,11 +305,15 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST } MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; +/* values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + /* values for the BufferType field */ #define MPI2_DIAG_BUF_TYPE_TRACE (0x00) #define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) /* count of the number of buffer types */ -#define MPI2_DIAG_BUF_TYPE_COUNT (0x02) +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) /**************************************************************************** @@ -314,7 +322,7 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { - U8 Reserved1; /* 0x00 */ + U8 ExtendedType; /* 0x00 */ U8 BufferType; /* 0x01 */ U8 MsgLength; /* 0x02 */ U8 Function; /* 0x03 */ -- cgit v1.2.3-59-g8ed1b From fa7f31673583a6e0876f8bb420735cdd8a3ffa57 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:26:58 +0530 Subject: [SCSI] mpt2sas: Support for stopping driver when Firmware encounters Added command line option and shost sysfs attribute called mpt2sas_fwfault_debug. When enduser writes a "1" to this parameter, this will enable support in the driver for debugging firmware timeout related issues. This handling was added in three areas (a) scsi error handling callback called task_abort, (b) IOCTL interface, and (c) other timeouts that result in diag resets, such as manufacturing config pages. When this support is enabled, the driver will provide dump_stack to console, halt controller firmware, and panic driver. The end user probably would want to setup serial console redirection so the dump stack can be seen. Here are the three methods for enable this support: (a) # insmod mpt2sas.ko mpt2sas_fwfault_debug=1 (b) # echo 1 > /sys/module/mpt2sas/parameters/mpt2sas_fwfault_debug (c) # echo 1 > /sys/class/scsi_host/host#/fwfault_debug (where # is the host number) Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 88 ++++++++++++++++++++++++++++++------ drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++ drivers/scsi/mpt2sas/mpt2sas_ctl.c | 41 ++++++++++++++++- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 2 + 4 files changed, 119 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 670241efa4b5..617664cbf3f7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -77,6 +77,32 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +int mpt2sas_fwfault_debug; +MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " + "and halt firmware - (default=0)"); + +/** + * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. + * + */ +static int +_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT2SAS_ADAPTER *ioc; + + if (ret) + return ret; + + printk(KERN_INFO "setting logging_level(0x%08x)\n", + mpt2sas_fwfault_debug); + list_for_each_entry(ioc, &mpt2sas_ioc_list, list) + ioc->fwfault_debug = mpt2sas_fwfault_debug; + return 0; +} +module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug, + param_get_int, &mpt2sas_fwfault_debug, 0644); + /** * _base_fault_reset_work - workq handling ioc fault conditions * @work: input argument, used to derive ioc @@ -177,6 +203,51 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) } } +/** + * mpt2sas_base_fault_info - verbose translation of firmware FAULT code + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code) +{ + printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +/** + * mpt2sas_halt_firmware - halt's mpt controller firmware + * @ioc: per adapter object + * + * For debugging timeout related issues. Writing 0xCOFFEE00 + * to the doorbell register will halt controller firmware. With + * the purpose to stop both driver and firmware, the enduser can + * obtain a ring buffer from controller UART. + */ +void +mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc) +{ + u32 doorbell; + + if (!ioc->fwfault_debug) + return; + + dump_stack(); + + doorbell = readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt2sas_base_fault_info(ioc , doorbell); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + printk(MPT2SAS_ERR_FMT "Firmware is halted due to command " + "timeout\n", ioc->name); + } + + panic("panic in %s\n", __func__); +} + #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _base_sas_ioc_info - verbose translation of the ioc status @@ -525,20 +596,6 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) sas_loginfo.dw.subcode); } -/** - * mpt2sas_base_fault_info - verbose translation of firmware FAULT code - * @ioc: pointer to scsi command object - * @fault_code: fault code - * - * Return nothing. - */ -void -mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code) -{ - printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n", - ioc->name, fault_code); -} - /** * _base_display_reply_info - * @ioc: pointer to scsi command object @@ -3684,6 +3741,9 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, __func__)); + if (mpt2sas_fwfault_debug) + mpt2sas_halt_firmware(ioc); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); if (ioc->shost_recovery) { spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index fa99ff204e46..0c75c0e137f7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -466,6 +466,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); * @chip_phys: physical addrss prior to mapping * @pio_chip: I/O mapped register space * @logging_level: see mpt2sas_debug.h + * @fwfault_debug: debuging FW timeouts * @ir_firmware: IR firmware present * @bars: bitmask of BAR's that must be configured * @mask_interrupts: ignore interrupt @@ -587,6 +588,7 @@ struct MPT2SAS_ADAPTER { unsigned long chip_phys; unsigned long pio_chip; int logging_level; + int fwfault_debug; u8 ir_firmware; int bars; u8 mask_interrupts; @@ -803,6 +805,8 @@ int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type); +void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); + /* scsih shared API */ u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 57d724633906..6901a6706ede 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -896,6 +896,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, printk(MPT2SAS_INFO_FMT "issue target reset: handle " "= (0x%04x)\n", ioc->name, mpi_request->FunctionDependent1); + mpt2sas_halt_firmware(ioc); mutex_lock(&ioc->tm_cmds.mutex); mpt2sas_scsih_issue_tm(ioc, mpi_request->FunctionDependent1, 0, @@ -2474,6 +2475,43 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, _ctl_logging_level_store); +/* device attributes */ +/* + * _ctl_fwfault_debug_show - show/store fwfault_debug + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * mpt2sas_fwfault_debug is command line option + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_fwfault_debug_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} +static ssize_t +_ctl_fwfault_debug_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->fwfault_debug = val; + printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, + _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); + struct device_attribute *mpt2sas_host_attrs[] = { &dev_attr_version_fw, &dev_attr_version_bios, @@ -2487,13 +2525,12 @@ struct device_attribute *mpt2sas_host_attrs[] = { &dev_attr_io_delay, &dev_attr_device_delay, &dev_attr_logging_level, + &dev_attr_fwfault_debug, &dev_attr_fw_queue_depth, &dev_attr_host_sas_address, NULL, }; -/* device attributes */ - /** * _ctl_device_sas_address_show - sas address * @cdev - pointer to embedded class device diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 91d61154a46c..59ea821c2a3c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1929,6 +1929,8 @@ _scsih_abort(struct scsi_cmnd *scmd) goto out; } + mpt2sas_halt_firmware(ioc); + mutex_lock(&ioc->tm_cmds.mutex); handle = sas_device_priv_data->sas_target->handle; mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun, -- cgit v1.2.3-59-g8ed1b From cef7a12cd1e0647ce2b566a76bbf4cd132b9118d Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:27:41 +0530 Subject: [SCSI] mpt2sas: Fixed some of the comment Fixed some of the comments sections for some of the function so "@ioc: pointer to scsi command object" was changed to "@ioc: per adapter object" Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 617664cbf3f7..ec3f57732e97 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -147,7 +147,7 @@ _base_fault_reset_work(struct work_struct *work) /** * mpt2sas_base_start_watchdog - start the fault_reset_work_q - * @ioc: pointer to scsi command object + * @ioc: per adapter object * Context: sleep. * * Return nothing. @@ -181,7 +181,7 @@ mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) /** * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q - * @ioc: pointer to scsi command object + * @ioc: per adapter object * Context: sleep. * * Return nothing. @@ -251,7 +251,7 @@ mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc) #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _base_sas_ioc_info - verbose translation of the ioc status - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @request_hdr: request mf * @@ -465,7 +465,7 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, /** * _base_display_event_data - verbose translation of firmware asyn events - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * * Return nothing. @@ -545,7 +545,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, /** * _base_sas_log_info - verbose translation of firmware log info - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @log_info: log info * * Return nothing. @@ -598,7 +598,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) /** * _base_display_reply_info - - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) @@ -627,7 +627,7 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, /** * mpt2sas_base_done - base internal command completion routine - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) @@ -660,7 +660,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, /** * _base_async_event - main callback handler for firmware asyn events - * @ioc: pointer to scsi command object + * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * @@ -741,7 +741,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) /** * _base_mask_interrupts - disable interrupts - * @ioc: pointer to scsi command object + * @ioc: per adapter object * * Disabling ResetIRQ, Reply and Doorbell Interrupts * @@ -761,7 +761,7 @@ _base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc) /** * _base_unmask_interrupts - enable interrupts - * @ioc: pointer to scsi command object + * @ioc: per adapter object * * Enabling only Reply Interrupts * -- cgit v1.2.3-59-g8ed1b From 32e0eb569df09a8cb790cf370ee498721d88e5c6 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:28:09 +0530 Subject: [SCSI] mpt2sas: Added command line option diag_buffer_enable. Added command line option diag_buffer_enable. When the command line option is set, the driver will automatically post diag buffers at driver load time. The command line option diag_buffer_enable is bitwise, so it's possible to enable both and/or snapshot + trace buffers. For trace, the driver will allocate 1MB buffer, whereas for snapshot its 2MB. The purpose for this is so the enduser doesn't have to manually use an application to setup diag buffers for debugging firmware related issues. Here is some examples trace: # insmod mpt2sas.ko diag_buffer_enable=1 snapshot: # insmod mpt2sas.ko diag_buffer_enable=2 both trace and snapshot: # insmod mpt2sas.ko diag_buffer_enable=3 Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 13 ++++ drivers/scsi/mpt2sas/mpt2sas_base.h | 3 + drivers/scsi/mpt2sas/mpt2sas_ctl.c | 125 +++++++++++++++++++++++++----------- 3 files changed, 104 insertions(+), 37 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index ec3f57732e97..1d2374b5a0a1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -77,6 +77,17 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +/* diag_buffer_enable is bitwise + * bit 0 set = MPI2_DIAG_BUF_TYPE_TRACE(1) + * bit 1 set = MPI2_DIAG_BUF_TYPE_SNAPSHOT(2) + * + * Either bit can be set, or both + */ +static int diag_buffer_enable; +module_param(diag_buffer_enable, int, 0); +MODULE_PARM_DESC(diag_buffer_enable, " enable diag buffer at driver load " + "time (TRACE=1/SNAP=2/default=0)"); + int mpt2sas_fwfault_debug; MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " "and halt firmware - (default=0)"); @@ -3588,6 +3599,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) goto out_free_resources; mpt2sas_base_start_watchdog(ioc); + if (diag_buffer_enable != 0) + mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); return 0; out_free_resources: diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 0c75c0e137f7..879fd70fd683 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -886,6 +886,9 @@ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply); +void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, + u8 bits_to_regsiter); + /* transport shared API */ u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 6901a6706ede..99a332d76f51 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -1256,18 +1256,15 @@ _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type) } /** - * _ctl_diag_register - application register with driver - * @arg - user space buffer containing ioctl content - * @state - NON_BLOCKING or BLOCKING + * _ctl_diag_register_2 - wrapper for registering diag buffer support + * @ioc: per adapter object + * @diag_register: the diag_register struct passed in from user space * - * This will allow the driver to setup any required buffers that will be - * needed by firmware to communicate with the driver. */ static long -_ctl_diag_register(void __user *arg, enum block_state state) +_ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc, + struct mpt2_diag_register *diag_register) { - struct mpt2_diag_register karg; - struct MPT2SAS_ADAPTER *ioc; int rc, i; void *request_data = NULL; dma_addr_t request_data_dma; @@ -1280,18 +1277,17 @@ _ctl_diag_register(void __user *arg, enum block_state state) u16 ioc_status; u8 issue_reset = 0; - if (copy_from_user(&karg, arg, sizeof(karg))) { - printk(KERN_ERR "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - return -EFAULT; - } - if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) - return -ENODEV; - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, __func__)); - buffer_type = karg.buffer_type; + if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { + printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + buffer_type = diag_register->buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); @@ -1306,24 +1302,12 @@ _ctl_diag_register(void __user *arg, enum block_state state) return -EINVAL; } - if (karg.requested_buffer_size % 4) { + if (diag_register->requested_buffer_size % 4) { printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " "is not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } - if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) - return -EAGAIN; - else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) - return -ERESTARTSYS; - - if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { - printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", - ioc->name, __func__); - rc = -EAGAIN; - goto out; - } - smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", @@ -1339,12 +1323,12 @@ _ctl_diag_register(void __user *arg, enum block_state state) ioc->ctl_cmds.smid = smid; request_data = ioc->diag_buffer[buffer_type]; - request_data_sz = karg.requested_buffer_size; - ioc->unique_id[buffer_type] = karg.unique_id; + request_data_sz = diag_register->requested_buffer_size; + ioc->unique_id[buffer_type] = diag_register->unique_id; ioc->diag_buffer_status[buffer_type] = 0; - memcpy(ioc->product_specific[buffer_type], karg.product_specific, - MPT2_PRODUCT_SPECIFIC_DWORDS); - ioc->diagnostic_flags[buffer_type] = karg.diagnostic_flags; + memcpy(ioc->product_specific[buffer_type], + diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS); + ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; if (request_data) { request_data_dma = ioc->diag_buffer_dma[buffer_type]; @@ -1374,8 +1358,8 @@ _ctl_diag_register(void __user *arg, enum block_state state) } mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; - mpi_request->BufferType = karg.buffer_type; - mpi_request->Flags = cpu_to_le32(karg.diagnostic_flags); + mpi_request->BufferType = diag_register->buffer_type; + mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); mpi_request->BufferAddress = cpu_to_le64(request_data_dma); mpi_request->BufferLength = cpu_to_le32(request_data_sz); mpi_request->VF_ID = 0; /* TODO */ @@ -1439,6 +1423,73 @@ _ctl_diag_register(void __user *arg, enum block_state state) request_data, request_data_dma); ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; + return rc; +} + +/** + * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time + * @ioc: per adapter object + * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 + * + * This is called when command line option diag_buffer_enable is enabled + * at driver load time. + */ +void +mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register) +{ + struct mpt2_diag_register diag_register; + + memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); + + if (bits_to_register & 1) { + printk(MPT2SAS_INFO_FMT "registering trace buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + /* register for 1MB buffers */ + diag_register.requested_buffer_size = (1024 * 1024); + diag_register.unique_id = 0x7075900; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 2) { + printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } +} + +/** + * _ctl_diag_register - application register with driver + * @arg - user space buffer containing ioctl content + * @state - NON_BLOCKING or BLOCKING + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +static long +_ctl_diag_register(void __user *arg, enum block_state state) +{ + struct mpt2_diag_register karg; + struct MPT2SAS_ADAPTER *ioc; + long rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + printk(KERN_ERR "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) + return -ENODEV; + + if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) + return -EAGAIN; + else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) + return -ERESTARTSYS; + rc = _ctl_diag_register_2(ioc, &karg); mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } -- cgit v1.2.3-59-g8ed1b From 1b01fe3aa58b114b2dc296676023451c6434561e Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:28:59 +0530 Subject: [SCSI] mpt2sas: Add Extended Type for Diagnostic Buffer support Added tests for registry entries of EXBuffSize, EXImmed, and EXType to support the new Extended diag buffer type. Modified code where necessary to handle the new ExtendedType field in the F/W diagnostic Post and Release messages. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 15 +++++++++++---- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 18 ++++++++++++++++-- drivers/scsi/mpt2sas/mpt2sas_ctl.h | 4 ++-- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 1d2374b5a0a1..935cfc769cb6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -78,15 +78,16 @@ module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); /* diag_buffer_enable is bitwise - * bit 0 set = MPI2_DIAG_BUF_TYPE_TRACE(1) - * bit 1 set = MPI2_DIAG_BUF_TYPE_SNAPSHOT(2) + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED * * Either bit can be set, or both */ static int diag_buffer_enable; module_param(diag_buffer_enable, int, 0); -MODULE_PARM_DESC(diag_buffer_enable, " enable diag buffer at driver load " - "time (TRACE=1/SNAP=2/default=0)"); +MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " + "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); int mpt2sas_fwfault_debug; MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " @@ -1764,6 +1765,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) i++; } + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { + printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { printk("%sTask Set Full", i ? "," : ""); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 99a332d76f51..8dfc5f6a39b0 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -1230,7 +1230,7 @@ _ctl_btdh_mapping(void __user *arg) /** * _ctl_diag_capability - return diag buffer capability * @ioc: per adapter object - * @buffer_type: specifies either TRACE or SNAPSHOT + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED * * returns 1 when diag buffer support is enabled in firmware */ @@ -1250,6 +1250,10 @@ _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type) MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) rc = 1; break; + case MPI2_DIAG_BUF_TYPE_EXTENDED: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) + rc = 1; } return rc; @@ -1460,6 +1464,16 @@ mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register) diag_register.unique_id = 0x7075901; _ctl_diag_register_2(ioc, &diag_register); } + + if (bits_to_register & 4) { + printk(MPT2SAS_INFO_FMT "registering extended buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } } /** @@ -1652,7 +1666,7 @@ _ctl_diag_query(void __user *arg) /** * _ctl_send_release - Diag Release Message * @ioc: per adapter object - * @buffer_type - specifies either TRACE or SNAPSHOT + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED * @issue_reset - specifies whether host reset is required. * */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 211f296dd191..8a5eeb1a5c84 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -313,7 +313,7 @@ struct mpt2_ioctl_btdh_mapping { * struct mpt2_diag_register - application register with driver * @hdr - generic header * @reserved - - * @buffer_type - specifies either TRACE or SNAPSHOT + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED * @application_flags - misc flags * @diagnostic_flags - specifies flags affecting command processing * @product_specific - product specific information @@ -352,7 +352,7 @@ struct mpt2_diag_unregister { * struct mpt2_diag_query - query relevant info associated with diag buffers * @hdr - generic header * @reserved - - * @buffer_type - specifies either TRACE or SNAPSHOT + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED * @application_flags - misc flags * @diagnostic_flags - specifies flags affecting command processing * @product_specific - product specific information -- cgit v1.2.3-59-g8ed1b From a8ebd76c49fa45d93a736ae0b0f192b554cc8c3f Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:29:29 +0530 Subject: [SCSI] mpt2sas: Added support to set the TimeStamp when sending ioc_init Added support to set the TimeStamp when sending ioc_init. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 935cfc769cb6..c3524bcefd54 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -57,6 +57,7 @@ #include #include #include +#include #include "mpt2sas_base.h" @@ -2946,6 +2947,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) Mpi2IOCInitRequest_t mpi_request; Mpi2IOCInitReply_t mpi_reply; int r; + struct timeval current_time; dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, __func__)); @@ -2996,6 +2998,13 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) cpu_to_le32(ioc->reply_post_free_dma); #endif + /* This time stamp specifies number of milliseconds + * since epoch ~ midnight January 1, 1970. + */ + do_gettimeofday(¤t_time); + mpi_request.TimeStamp = (current_time.tv_sec * 1000) + + (current_time.tv_usec >> 3); + if (ioc->logging_level & MPT_DEBUG_INIT) { u32 *mfp; int i; -- cgit v1.2.3-59-g8ed1b From e0077d607f716f68d15ab6fbf3d9f4c41434142d Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:30:22 +0530 Subject: [SCSI] mpt2sas: Limit the max_depth to 32 for SATA devices which are not part of volume Added sanity check in _scsih_change_queue_depth to limit the max_depth to 32 for SATA devices. This is only for physical devices not part of a volume. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 59ea821c2a3c..eb0215a2b5fa 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1107,8 +1107,33 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) struct Scsi_Host *shost = sdev->host; int max_depth; int tag_type; + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT2SAS_DEVICE *sas_device_priv_data; + struct MPT2SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + unsigned long flags; max_depth = shost->can_queue; + + /* limit max device queue for SATA to 32 */ + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) + goto not_sata; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, + sas_device_priv_data->sas_target->sas_address); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device && sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = MPT2SAS_SATA_QUEUE_DEPTH; + + not_sata: + if (!sdev->tagged_supported) max_depth = 1; if (qdepth > max_depth) -- cgit v1.2.3-59-g8ed1b From ec6c2b43b08f29e08ed5440abae1ec18d80fa8b7 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:31:01 +0530 Subject: [SCSI] mpt2sas: Added new info messages for IR and Expander events. (1) for the MPI2_EVENT_IR_OPERATION_STATUS event, add support to print "background init" or "make data consistent" for debugging purposes. If the RAIDOperation is set to a value not defined, then don't print anything (2) for the MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE event, add support to print "expander reduced functionality" and "expander reduced functionality complete", which are new events. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index eb0215a2b5fa..c20c1e8cb8a7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -4279,6 +4279,12 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc, case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: reason_str = "internal async notification"; break; + case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; default: reason_str = "unknown reason"; break; @@ -5062,11 +5068,17 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc, case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: reason_str = "consistency check"; break; - default: - reason_str = "unknown reason"; + case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; break; } + if (!reason_str) + return; + printk(MPT2SAS_INFO_FMT "raid operational status: (%s)" "\thandle(0x%04x), percent complete(%d)\n", ioc->name, reason_str, -- cgit v1.2.3-59-g8ed1b From 96b681c6ad3f2f5013c0ffc558969ee6ac8c450b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:32:06 +0530 Subject: [SCSI] mpt2sas: Retrieve the ioc facts prior to putting the controller into READY state The driver needs to retrieve the ioc facts prior to putting the controller into READY state. The current design is calling ioc facts after putting the controller into READY state, which means the driver is sending a diag reset instead of message unit reset becuase the capability information is not yet available. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index c3524bcefd54..84087cc2d772 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3545,11 +3545,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) return r; pci_set_drvdata(ioc->pdev, ioc->shost); - r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) goto out_free_resources; - r = _base_get_ioc_facts(ioc, CAN_SLEEP); + r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); if (r) goto out_free_resources; -- cgit v1.2.3-59-g8ed1b From e4e7c7ed3485bc530499158e28539e00d47f9ef2 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:33:14 +0530 Subject: [SCSI] mpt2sas: Return DID_TRANSPORT_DISRUPTED in nexus loss,SCSI_MLQUEUE_DEVICE_BUSY if device is busy 1 Its observed that the OS was sending request to the driver after it had been put into blocking state, so the driver was modified to return SCSI_MLQUEUE_DEVICE_BUSY. 2. Driver will return DID_TRANSPORT_DISRUPTED when sdev is haivng nexus loss. This occurrs when sdev is blocked, between the MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING and MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING events. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c20c1e8cb8a7..b5531a251be9 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2885,7 +2885,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } /* see if we are busy with task managment stuff */ - if (sas_target_priv_data->tm_busy) + if (sas_device_priv_data->block || sas_target_priv_data->tm_busy) return SCSI_MLQUEUE_DEVICE_BUSY; else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; @@ -3351,10 +3351,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: if (sas_device_priv_data->block) { - scmd->result = (DID_BUS_BUSY << 16); - break; + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; } - case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: scmd->result = DID_RESET << 16; -- cgit v1.2.3-59-g8ed1b From ec9472c74c3074541ea8389517f406b5c7ad0632 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:34:13 +0530 Subject: [SCSI] mpt2sas: mpt2sas_base_get_sense_buffer_dma should be returning little endian cpu_to_le64 when calculating the physical dma address. This will properly handle endianess on big endian systems. The return value of this function was changed from dma_addr_t to __le64. Remove the typecasting of u32 when setting the SenseBufferLowAddress, since its already in __le32 format. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 7 ++++--- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 84087cc2d772..db5e36735e72 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1328,12 +1328,13 @@ mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid) * @ioc: per adapter object * @smid: system request message index * - * Returns phys pointer to sense buffer. + * Returns phys pointer to the low 32bit address of the sense buffer. */ -dma_addr_t +__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) { - return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE); + return cpu_to_le32(ioc->sense_dma + + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); } /** diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 879fd70fd683..7efb6ab749df 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -771,7 +771,7 @@ int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid); void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); -dma_addr_t mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, +__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid); /* hi-priority queue */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 8dfc5f6a39b0..afdb4f36c30d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -740,7 +740,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, Mpi2SCSIIORequest_t *scsiio_request = (Mpi2SCSIIORequest_t *)mpi_request; scsiio_request->SenseBufferLowAddress = - (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); + mpt2sas_base_get_sense_buffer_dma(ioc, smid); priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); mpt2sas_base_put_smid_scsi_io(ioc, smid, diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index b5531a251be9..6f5e2e471b48 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2939,7 +2939,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; mpi_request->SenseBufferLowAddress = - (u32)mpt2sas_base_get_sense_buffer_dma(ioc, smid); + mpt2sas_base_get_sense_buffer_dma(ioc, smid); mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); -- cgit v1.2.3-59-g8ed1b From 463217bfecbf5d17a30133a55553d94aa9fc255e Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Mon, 5 Oct 2009 15:53:06 +0530 Subject: [SCSI] mpt2sas : PPC (power pc) endian bug fix's (1) EEDP(End to End data protection) was not working. This was due to not setting EEDP BlockSize and Flags to little endian format in the message frame. (2) Some expander sysfs attributes were not getting set properly. The sas format was not getting set due to endian issues with sas_format field in the struct rep_manu_reply. Since sas_format was not set properly, the component_vendor_id, component_revision_id, and component_id were not set. (3) In _transport_smp_handler: we don't need to convert the smid from little endian to cpu prior to calling mpt2sas_base_free_smid, because its allready in cpu format. (4) Some loginfos and ioc status were not xonverted from little endian to cpu. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 4 +++- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 11 ++++++----- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 17 ++++++++--------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 14 +++++++------- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index db5e36735e72..6422e258fd52 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2949,6 +2949,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) Mpi2IOCInitReply_t mpi_reply; int r; struct timeval current_time; + u16 ioc_status; dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, __func__)); @@ -3028,7 +3029,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) return r; } - if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS || + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); r = -EIO; diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index afdb4f36c30d..84a124f8e21f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -848,8 +848,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: " "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " "TerminationCount(0x%08x)\n", ioc->name, - tm_reply->IOCStatus, tm_reply->IOCLogInfo, - tm_reply->TerminationCount); + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); } #endif /* copy out xdata to user */ @@ -1411,7 +1412,7 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc, } else { printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, - ioc_status, mpi_reply->IOCLogInfo); + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } @@ -1756,7 +1757,7 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset) } else { printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, - ioc_status, mpi_reply->IOCLogInfo); + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } @@ -2017,7 +2018,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) } else { printk(MPT2SAS_DEBUG_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, - ioc_status, mpi_reply->IOCLogInfo); + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6f5e2e471b48..d0d66726ff69 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2775,8 +2775,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) else return; - mpi_request->EEDPBlockSize = scmd->device->sector_size; - switch (prot_type) { case SCSI_PROT_DIF_TYPE1: @@ -2784,8 +2782,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) * enable ref/guard checking * auto increment ref tag */ - mpi_request->EEDPFlags = eedp_flags | - MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = @@ -2798,11 +2795,11 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) /* * enable guard checking */ - mpi_request->EEDPFlags = eedp_flags | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; break; } + mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size); + mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); } /** @@ -4395,6 +4392,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, #ifdef CONFIG_SCSI_MPT2SAS_LOGGING Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; #endif + u16 ioc_status; dewtprintk(ioc, printk(MPT2SAS_DEBUG_FMT "broadcast primative: " "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, event_data->PortWidth)); @@ -4428,8 +4426,9 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, mpt2sas_scsih_issue_tm(ioc, handle, lun, MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; - - if ((mpi_reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) && + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 8030bc2774c8..3a82872bad44 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -258,8 +258,7 @@ struct rep_manu_reply{ u8 response_length; u16 expander_change_count; u8 reserved0[2]; - u8 sas_format:1; - u8 reserved1:7; + u8 sas_format; u8 reserved2[3]; u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; @@ -374,7 +373,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, mpi_request->VP_ID = 0; sas_address_le = (u64 *)&mpi_request->SASAddress; *sas_address_le = cpu_to_le64(sas_address); - mpi_request->RequestDataLength = sizeof(struct rep_manu_request); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct rep_manu_request)); psge = &mpi_request->SGL; /* WRITE sgel first */ @@ -437,8 +437,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, SAS_EXPANDER_PRODUCT_ID_LEN); strncpy(edev->product_rev, manufacture_reply->product_rev, SAS_EXPANDER_PRODUCT_REV_LEN); - edev->level = manufacture_reply->sas_format; - if (manufacture_reply->sas_format) { + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { strncpy(edev->component_vendor_id, manufacture_reply->component_vendor_id, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); @@ -1116,7 +1116,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); if (!dma_addr_out) { - mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); + mpt2sas_base_free_smid(ioc, smid); goto unmap; } @@ -1134,7 +1134,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); if (!dma_addr_in) { - mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); + mpt2sas_base_free_smid(ioc, smid); goto unmap; } -- cgit v1.2.3-59-g8ed1b From 8ffc457ed6fe33728657a0cfb7509b90d554c21f Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:35:41 +0530 Subject: [SCSI] mpt2sas: Freeze the sdev IO queue when firmware sends internal dev reset When receiving the MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET event, the driver will set the tm_busy flag in the sdev private host data, When tm_busy flag is set, the driver will return SCSI_MLQUEUE_DEVICE_BUSY, effectly freezing the IO to the device. The tm_busy flag is cleared with the MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET event. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index d0d66726ff69..c81e84291d2e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -4308,11 +4308,43 @@ static void _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { + struct MPT2SAS_TARGET *target_priv_data; + struct _sas_device *sas_device; + __le64 sas_address; + unsigned long flags; + Mpi2EventDataSasDeviceStatusChange_t *event_data = + fw_event->event_data; + #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_device_status_change_event_debug(ioc, - fw_event->event_data); + event_data); #endif + + if (!(event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, + sas_address); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!sas_device || !sas_device->starget) + return; + + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + return; + + if (event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING -- cgit v1.2.3-59-g8ed1b From 744090d38b47ed8ead8f68b6f0c65866c0b9b17a Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Mon, 5 Oct 2009 15:56:56 +0530 Subject: [SCSI] mpt2sas : Add support for RAID Action System Shutdown Initiated at OS shutdown (1) Added new function _scsih_ir_shutdown. This function will issue the MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED request via MPI2_FUNCTION_RAID_ACTION. The function will wait 10 seconds for reply message frame, then print out the ioc status and loginfo. This function is only called when there are raid volumes present. (2) Add shutdown callback in the struct pci_driver object scsih_driver. This will be called only when the system is shutting down. From this function, we will call _scsih_ir_shutdown mentioned above. (3) Add support in _scsih_remove to call _scsih_ir_shutdown. The function _scsih_remove will be called when the driver is unloaded (and system is still running). scsih internal command contex is added to send internal message frames from mpt2sas_scsih.c. Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 133 ++++++++++++++++++++++++++++++++++- 2 files changed, 136 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 7efb6ab749df..eb51fe9f369d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -492,12 +492,14 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); * @msix_table_backup: backup msix table * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands + * @scsih_cb_idx: scsih internal commands * @transport_cb_idx: transport internal commands * @ctl_cb_idx: clt internal commands * @base_cb_idx: base internal commands * @config_cb_idx: base internal commands * @base_cmds: * @transport_cmds: + * @scsih_cmds: * @tm_cmds: * @ctl_cmds: * @config_cmds: @@ -624,6 +626,7 @@ struct MPT2SAS_ADAPTER { u8 scsi_io_cb_idx; u8 tm_cb_idx; u8 transport_cb_idx; + u8 scsih_cb_idx; u8 ctl_cb_idx; u8 base_cb_idx; u8 config_cb_idx; @@ -631,6 +634,7 @@ struct MPT2SAS_ADAPTER { u8 tm_sas_control_cb_idx; struct _internal_cmd base_cmds; struct _internal_cmd transport_cmds; + struct _internal_cmd scsih_cmds; struct _internal_cmd tm_cmds; struct _internal_cmd ctl_cmds; struct _internal_cmd config_cmds; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c81e84291d2e..5916bddf3551 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -76,6 +76,7 @@ static u8 tm_cb_idx = -1; static u8 ctl_cb_idx = -1; static u8 base_cb_idx = -1; static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; static u8 config_cb_idx = -1; static int mpt_ids; @@ -3793,6 +3794,40 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) return rc; } +/** + * _scsih_done - scsih callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated message frames. + * The callback index passed is `ioc->scsih_cb_idx` + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + /** * _scsih_expander_remove - removing expander object * @ioc: per adapter object @@ -5853,10 +5888,100 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, kfree(sas_expander); } +/** + * _scsih_ir_shutdown - IR shutdown notification + * @ioc: per adapter object + * + * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that + * the host system is shutting down. + * + * Return nothing. + */ +static void +_scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + + /* is IR firmware build loaded ? */ + if (!ioc->ir_firmware) + return; + + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { + printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = MPT2_CMD_PENDING; + + smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; + + printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); + init_completion(&ioc->scsih_cmds.done); + mpt2sas_base_put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) { + printk(MPT2SAS_ERR_FMT "%s: timeout\n", + ioc->name, __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + + printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _scsih_shutdown - routine call during system shutdown + * @pdev: PCI device struct + * + * Return nothing. + */ +static void +_scsih_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + + _scsih_ir_shutdown(ioc); + mpt2sas_base_detach(ioc); +} + /** * _scsih_remove - detach and remove add host * @pdev: PCI device struct * + * Routine called when unloading the driver. * Return nothing. */ static void __devexit @@ -5913,7 +6038,7 @@ _scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); - mpt2sas_base_detach(ioc); + _scsih_shutdown(pdev); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); @@ -6097,6 +6222,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->ctl_cb_idx = ctl_cb_idx; ioc->base_cb_idx = base_cb_idx; ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; ioc->config_cb_idx = config_cb_idx; ioc->tm_tr_cb_idx = tm_tr_cb_idx; ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; @@ -6234,6 +6360,7 @@ static struct pci_driver scsih_driver = { .id_table = scsih_pci_table, .probe = _scsih_probe, .remove = __devexit_p(_scsih_remove), + .shutdown = _scsih_shutdown, #ifdef CONFIG_PM .suspend = _scsih_suspend, .resume = _scsih_resume, @@ -6275,6 +6402,9 @@ _scsih_init(void) transport_cb_idx = mpt2sas_base_register_callback_handler( mpt2sas_transport_done); + /* scsih internal commands callback handler */ + scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done); + /* configuration page API internal commands callback handler */ config_cb_idx = mpt2sas_base_register_callback_handler( mpt2sas_config_done); @@ -6314,6 +6444,7 @@ _scsih_exit(void) mpt2sas_base_release_callback_handler(tm_cb_idx); mpt2sas_base_release_callback_handler(base_cb_idx); mpt2sas_base_release_callback_handler(transport_cb_idx); + mpt2sas_base_release_callback_handler(scsih_cb_idx); mpt2sas_base_release_callback_handler(config_cb_idx); mpt2sas_base_release_callback_handler(ctl_cb_idx); -- cgit v1.2.3-59-g8ed1b From e7d59c17a70e59a052d29467bbefb23ce700dcd4 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:36:52 +0530 Subject: [SCSI] mpt2sas: No link rate change, do not call update links nor unblock device (1) target resets are sending link change rate events with no link rate change -> thus said the driver was modified so when there is no link rate change, we don't need to call mpt2sas_transport_update_links nor _scsih_ublock_io_device. (2) There were changes made in _scsih_sas_topology_change_event_debug to change the debug strings so they are more clear. Also the link rate change information was added to display the new and previous link rate. for the MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST event when the ExpStatus is set to zero, display "responding" instead of "unknown status". Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 52 +++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5916bddf3551..8dc682f00fd2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2380,7 +2380,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, u16 handle; u16 reason_code; u8 phy_number; - u8 link_rate; for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); @@ -2391,11 +2390,6 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) _scsih_block_io_device(ioc, handle); - if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { - link_rate = event_data->PHY[i].LinkRate >> 4; - if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) - _scsih_ublock_io_device(ioc, handle); - } } } @@ -4084,7 +4078,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, u16 reason_code; u8 phy_number; char *status_str = NULL; - char link_rate[25]; + u8 link_rate, prev_link_rate; switch (event_data->ExpStatus) { case MPI2_EVENT_SAS_TOPO_ES_ADDED: @@ -4094,6 +4088,7 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, status_str = "remove"; break; case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: status_str = "responding"; break; case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: @@ -4119,30 +4114,30 @@ _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, MPI2_EVENT_SAS_TOPO_RC_MASK; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: - snprintf(link_rate, 25, ": add, link(0x%02x)", - (event_data->PHY[i].LinkRate >> 4)); - status_str = link_rate; + status_str = "target add"; break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: - status_str = ": remove"; + status_str = "target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: - status_str = ": remove_delay"; + status_str = "delay target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: - snprintf(link_rate, 25, ": link(0x%02x)", - (event_data->PHY[i].LinkRate >> 4)); - status_str = link_rate; + status_str = "link rate change"; break; case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: - status_str = ": responding"; + status_str = "target responding"; break; default: - status_str = ": unknown"; + status_str = "unknown"; break; } - printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x)%s\n", - phy_number, handle, status_str); + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + printk(KERN_DEBUG "\tphy(%02d), attached_handle(0x%04x): %s:" + " link rate: new(0x%02x), old(0x%02x)\n", phy_number, + handle, status_str, link_rate, prev_link_rate); + } } #endif @@ -4166,7 +4161,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device; u64 sas_address; unsigned long flags; - u8 link_rate; + u8 link_rate, prev_link_rate; Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING @@ -4226,18 +4221,25 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, if (!handle) continue; link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + + if (link_rate == prev_link_rate) + break; + + mpt2sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) + _scsih_ublock_io_device(ioc, handle); + break; case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: mpt2sas_transport_update_links(ioc, sas_address, handle, phy_number, link_rate); - if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) - break; - if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { - _scsih_add_device(ioc, handle, phy_number, 0); - } + _scsih_add_device(ioc, handle, phy_number, 0); break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: -- cgit v1.2.3-59-g8ed1b From 153f251e477f41dab0314c4cd2004b9e7ebac4eb Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 23 Sep 2009 17:40:05 +0530 Subject: [SCSI] mpt2sas: Bump version 03.100.03.00 Bump version to 03.100.03.00 Signed-off-by: Kashyap Desai Signed-off-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index eb51fe9f369d..bb4f14656afa 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "02.100.03.00" -#define MPT2SAS_MAJOR_VERSION 02 +#define MPT2SAS_DRIVER_VERSION "03.100.03.00" +#define MPT2SAS_MAJOR_VERSION 03 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 03 #define MPT2SAS_RELEASE_VERSION 00 -- cgit v1.2.3-59-g8ed1b From 8d56825321339f0ef7ad08eb58332e1836881e3b Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:12:21 -0600 Subject: [SCSI] megaraid_sas: tape drive support fix Add the Tape drive fix to the megaraid_sas driver: If the command is for the tape device, set the FW pthru timeout to the os layer timeout value. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index a39addc3a596..6fd1e1796029 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -686,6 +686,17 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + /* + * If the command is for the tape device, set the + * pthru timeout to the os layer timeout value. + */ + if (scp->device->type == TYPE_TAPE) { + if ((scp->request->timeout / HZ) > 0xFFFF) + pthru->timeout = 0xFFFF; + else + pthru->timeout = scp->request->timeout / HZ; + } + /* * Construct SGL */ -- cgit v1.2.3-59-g8ed1b From c35188377f12e5e0a74f18c3dfdd67baf88db514 Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:18:02 -0600 Subject: [SCSI] megaraid_sas: Add poll mechanism to megaraid sas driver Add Poll_wait mechanism to SAS-2 MegaRAID SAS Linux driver. Driver will wakeup poll after the driver get event from MegaRAID SAS FW. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 45 +++++++++++++++++++++++++++++++++++- drivers/scsi/megaraid/megaraid_sas.h | 1 + 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 6fd1e1796029..0d44fecf367d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -89,8 +90,14 @@ static struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); +static int megasas_poll_wait_aen; +static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); + static u32 megasas_dbg_lvl; +/* define lock for aen poll */ +spinlock_t poll_aen_lock; + static void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); @@ -1292,11 +1299,17 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { + unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ - if (!cmd->abort_aen) + if ((!cmd->abort_aen) && (instance->unload == 0)) { + spin_lock_irqsave(&poll_aen_lock, flags); + megasas_poll_wait_aen = 1; + spin_unlock_irqrestore(&poll_aen_lock, flags); + wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); + } else cmd->abort_aen = 0; @@ -1381,6 +1394,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; + unsigned long flags; if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; @@ -1470,6 +1484,12 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || + cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { + spin_lock_irqsave(&poll_aen_lock, flags); + megasas_poll_wait_aen = 0; + spin_unlock_irqrestore(&poll_aen_lock, flags); + } /* * See if got an event notification @@ -2583,6 +2603,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) *instance->producer = 0; *instance->consumer = 0; + megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct @@ -2607,6 +2628,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->completion_lock); + spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); @@ -2621,6 +2643,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) megasas_dbg_lvl = 0; instance->flag = 0; + instance->unload = 0; instance->last_time = 0; /* @@ -2924,6 +2947,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) struct megasas_instance *instance; instance = pci_get_drvdata(pdev); + instance->unload = 1; host = instance->host; if (poll_mode_io) @@ -3026,6 +3050,23 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) return rc; } +/** + * megasas_mgmt_poll - char node "poll" entry point + * */ +static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) +{ + unsigned int mask; + unsigned long flags; + poll_wait(file, &megasas_poll_wait, wait); + spin_lock_irqsave(&poll_aen_lock, flags); + if (megasas_poll_wait_aen) + mask = (POLLIN | POLLRDNORM); + else + mask = 0; + spin_unlock_irqrestore(&poll_aen_lock, flags); + return mask; +} + /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state @@ -3067,6 +3108,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; + cmd->frame->hdr.pad_0 = 0; /* * The management interface between applications and the fw uses @@ -3348,6 +3390,7 @@ static const struct file_operations megasas_mgmt_fops = { .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, + .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0d033248fdf1..900359fd3fb9 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1120,6 +1120,7 @@ struct megasas_instance { struct tasklet_struct isr_tasklet; u8 flag; + u8 unload; unsigned long last_time; struct timer_list io_completion_timer; -- cgit v1.2.3-59-g8ed1b From 72c4fd36dc7f755a5245ef2495fe27d5084d776d Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:20:59 -0600 Subject: [SCSI] megaraid_sas: add sysfs for AEN polling update the sysfs parameter to tell application driver support AEN poll Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 0d44fecf367d..012141378f3b 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -92,7 +92,7 @@ static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); - +static u32 support_poll_for_event; static u32 megasas_dbg_lvl; /* define lock for aen poll */ @@ -3431,6 +3431,15 @@ megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); +static ssize_t +megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_poll_for_event); +} + +static DRIVER_ATTR(support_poll_for_event, S_IRUGO, + megasas_sysfs_show_support_poll_for_event, NULL); + static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { @@ -3522,6 +3531,8 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); + support_poll_for_event = 2; + memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* @@ -3554,6 +3565,12 @@ static int __init megasas_init(void) &driver_attr_release_date); if (rval) goto err_dcf_rel_date; + + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); + if (rval) + goto err_dcf_support_poll_for_event; + rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) @@ -3569,8 +3586,13 @@ err_dcf_poll_mode_io: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); + +err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); + err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: -- cgit v1.2.3-59-g8ed1b From 879111224d0784eab623fe8130a1f4481e0e1966 Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:31:54 -0600 Subject: [SCSI] megaraid_sas: Add new megaraid SAS 2 controller support to the driver Add the new megaraid sas 2 controller to the driver. megaraid sas2 is LSI next generation SAS products. driver add the interface to support this product. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 139 +++++++++++++++++++++++++++++++++-- drivers/scsi/megaraid/megaraid_sas.h | 4 + 2 files changed, 138 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 012141378f3b..b6e43271883c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -76,6 +76,10 @@ static struct pci_device_id megasas_pci_table[] = { /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, + /* skinny*/ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, + /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, @@ -334,6 +338,99 @@ static struct megasas_instance_template megasas_instance_template_ppc = { .read_fw_status_reg = megasas_read_fw_status_reg_ppc, }; +/** + * megasas_enable_intr_skinny - Enables interrupts + * @regs: MFI register set + */ +static inline void +megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) +{ + writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); + + writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr_skinny - Disables interrupt + * @regs: MFI register set + */ +static inline void +megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs) +{ + u32 mask = 0xFFFFFFFF; + writel(mask, ®s->outbound_intr_mask); + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_read_fw_status_reg_skinny - returns the current FW status value + * @regs: MFI register set + */ +static u32 +megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) +{ + return readl(&(regs)->outbound_scratch_pad); +} + +/** + * megasas_clear_interrupt_skinny - Check & clear interrupt + * @regs: MFI register set + */ +static int +megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) +{ + u32 status; + /* + * Check if it is our interrupt + */ + status = readl(®s->outbound_intr_status); + + if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { + return 1; + } + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, ®s->outbound_intr_status); + + /* + * dummy read to flush PCI + */ + readl(®s->outbound_intr_status); + + return 0; +} + +/** + * megasas_fire_cmd_skinny - Sends command to the FW + * @frame_phys_addr : Physical address of cmd + * @frame_count : Number of frames for the command + * @regs : MFI register set + */ +static inline void +megasas_fire_cmd_skinny(dma_addr_t frame_phys_addr, u32 frame_count, + struct megasas_register_set __iomem *regs) +{ + writel(0, &(regs)->inbound_high_queue_port); + writel((frame_phys_addr | (frame_count<<1))|1, + &(regs)->inbound_low_queue_port); +} + +static struct megasas_instance_template megasas_instance_template_skinny = { + + .fire_cmd = megasas_fire_cmd_skinny, + .enable_intr = megasas_enable_intr_skinny, + .disable_intr = megasas_disable_intr_skinny, + .clear_intr = megasas_clear_intr_skinny, + .read_fw_status_reg = megasas_read_fw_status_reg_skinny, +}; + + /** * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers @@ -1587,16 +1684,34 @@ megasas_transition_to_ready(struct megasas_instance* instance) /* * Set the CLR bit in inbound doorbell */ - writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, - &instance->reg_set->inbound_doorbell); + if ((instance->pdev->device == \ + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + + writel( + MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, + &instance->reg_set->reserved_0[0]); + } else { + writel( + MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, + &instance->reg_set->inbound_doorbell); + } max_wait = 2; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: - writel(MFI_INIT_HOTPLUG, - &instance->reg_set->inbound_doorbell); + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + writel(MFI_INIT_HOTPLUG, + &instance->reg_set->reserved_0[0]); + } else + writel(MFI_INIT_HOTPLUG, + &instance->reg_set->inbound_doorbell); max_wait = 10; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; @@ -1607,7 +1722,15 @@ megasas_transition_to_ready(struct megasas_instance* instance) * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance->reg_set); - writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + writel(MFI_RESET_FLAGS, + &instance->reg_set->reserved_0[0]); + } else + writel(MFI_RESET_FLAGS, + &instance->reg_set->inbound_doorbell); max_wait = 60; cur_state = MFI_STATE_OPERATIONAL; @@ -2112,6 +2235,8 @@ static int megasas_init_mfi(struct megasas_instance *instance) * Map the message registers */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { instance->base_addr = pci_resource_start(instance->pdev, 1); } else { @@ -2142,6 +2267,10 @@ static int megasas_init_mfi(struct megasas_instance *instance) case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; + case PCI_DEVICE_ID_LSI_SAS0073SKINNY: + case PCI_DEVICE_ID_LSI_SAS0071SKINNY: + instance->instancet = &megasas_instance_template_skinny; + break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 900359fd3fb9..365a96172070 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -30,6 +30,8 @@ #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 #define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 +#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 +#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 /* * ===================================== @@ -584,6 +586,8 @@ struct megasas_ctrl_info { #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) +#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 +#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) /* * register set for both 1068 and 1078 controllers -- cgit v1.2.3-59-g8ed1b From 81e403ce3c6a34cd705bf54d4cdeefdeb7068a8d Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:27:54 -0600 Subject: [SCSI] megaraid_sas: infrastructure to get PDs from FW Add system PDs to OS. Driver implemented the get_pd_list function to get the system PD from FW. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 96 ++++++++++++++++++++++++++++++++++++ drivers/scsi/megaraid/megaraid_sas.h | 88 ++++++++++++++++++++++++++++++++- 2 files changed, 182 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index b6e43271883c..48c3658d73a7 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -2036,6 +2036,98 @@ static int megasas_alloc_cmds(struct megasas_instance *instance) return 0; } +/* + * megasas_get_pd_list_info - Returns FW's pd_list structure + * @instance: Adapter soft state + * @pd_list: pd_list structure + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_get_pd_list(struct megasas_instance *instance) +{ + int ret = 0, pd_index = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_PD_LIST *ci; + struct MR_PD_ADDRESS *pd_addr; + dma_addr_t ci_h = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + ci = pci_alloc_consistent(instance->pdev, + MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); + + if (!ci) { + printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; + dcmd->mbox.b[1] = 0; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); + dcmd->opcode = MR_DCMD_PD_LIST_QUERY; + dcmd->sgl.sge32[0].phys_addr = ci_h; + dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); + + if (!megasas_issue_polled(instance, cmd)) { + ret = 0; + } else { + ret = -1; + } + + /* + * the following function will get the instance PD LIST. + */ + + pd_addr = ci->addr; + + if ( ret == 0 && + (ci->count < + (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { + + memset(instance->pd_list, 0, + MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); + + for (pd_index = 0; pd_index < ci->count; pd_index++) { + + instance->pd_list[pd_addr->deviceId].tid = + pd_addr->deviceId; + instance->pd_list[pd_addr->deviceId].driveType = + pd_addr->scsiDevType; + instance->pd_list[pd_addr->deviceId].driveState = + MR_PD_STATE_SYSTEM; + pd_addr++; + } + } + + pci_free_consistent(instance->pdev, + MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), + ci, ci_h); + megasas_return_cmd(instance, cmd); + + return ret; +} + + /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state @@ -2326,6 +2418,10 @@ static int megasas_init_mfi(struct megasas_instance *instance) if (megasas_issue_init_mfi(instance)) goto fail_fw_init; + memset(instance->pd_list, 0 , + (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); + megasas_get_pd_list(instance); + ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); /* diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 365a96172070..8ac6b2659c12 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -133,6 +133,7 @@ #define MR_DCMD_CLUSTER 0x08000000 #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 +#define MR_DCMD_PD_LIST_QUERY 0x02010100 /* * MFI command completion codes @@ -253,9 +254,89 @@ enum MR_EVT_ARGS { MR_EVT_ARGS_STR, MR_EVT_ARGS_TIME, MR_EVT_ARGS_ECC, + MR_EVT_ARGS_LD_PROP, + MR_EVT_ARGS_PD_SPARE, + MR_EVT_ARGS_PD_INDEX, + MR_EVT_ARGS_DIAG_PASS, + MR_EVT_ARGS_DIAG_FAIL, + MR_EVT_ARGS_PD_LBA_LBA, + MR_EVT_ARGS_PORT_PHY, + MR_EVT_ARGS_PD_MISSING, + MR_EVT_ARGS_PD_ADDRESS, + MR_EVT_ARGS_BITMAP, + MR_EVT_ARGS_CONNECTOR, + MR_EVT_ARGS_PD_PD, + MR_EVT_ARGS_PD_FRU, + MR_EVT_ARGS_PD_PATHINFO, + MR_EVT_ARGS_PD_POWER_STATE, + MR_EVT_ARGS_GENERIC, +}; +/* + * define constants for device list query options + */ +enum MR_PD_QUERY_TYPE { + MR_PD_QUERY_TYPE_ALL = 0, + MR_PD_QUERY_TYPE_STATE = 1, + MR_PD_QUERY_TYPE_POWER_STATE = 2, + MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, + MR_PD_QUERY_TYPE_SPEED = 4, + MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; +enum MR_PD_STATE { + MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, + MR_PD_STATE_UNCONFIGURED_BAD = 0x01, + MR_PD_STATE_HOT_SPARE = 0x02, + MR_PD_STATE_OFFLINE = 0x10, + MR_PD_STATE_FAILED = 0x11, + MR_PD_STATE_REBUILD = 0x14, + MR_PD_STATE_ONLINE = 0x18, + MR_PD_STATE_COPYBACK = 0x20, + MR_PD_STATE_SYSTEM = 0x40, + }; + + + /* + * defines the physical drive address structure + */ +struct MR_PD_ADDRESS { + u16 deviceId; + u16 enclDeviceId; + + union { + struct { + u8 enclIndex; + u8 slotNumber; + } mrPdAddress; + struct { + u8 enclPosition; + u8 enclConnectorIndex; + } mrEnclAddress; + }; + u8 scsiDevType; + union { + u8 connectedPortBitmap; + u8 connectedPortNumbers; + }; + u64 sasAddr[2]; +} __packed; + +/* + * defines the physical drive list structure + */ +struct MR_PD_LIST { + u32 size; + u32 count; + struct MR_PD_ADDRESS addr[1]; +} __packed; + +struct megasas_pd_list { + u16 tid; + u8 driveType; + u8 driveState; +} __packed; + /* * SAS controller properties */ @@ -284,7 +365,7 @@ struct megasas_ctrl_prop { u8 expose_encl_devices; u8 reserved[38]; -} __attribute__ ((packed)); +} __packed; /* * SAS controller information @@ -527,7 +608,7 @@ struct megasas_ctrl_info { u8 pad[0x800 - 0x6a0]; -} __attribute__ ((packed)); +} __packed; /* * =============================== @@ -542,6 +623,8 @@ struct megasas_ctrl_info { #define MEGASAS_DEFAULT_INIT_ID -1 #define MEGASAS_MAX_LUN 8 #define MEGASAS_MAX_LD 64 +#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ + MEGASAS_MAX_DEV_PER_CHANNEL) #define MEGASAS_DBG_LVL 1 @@ -1089,6 +1172,7 @@ struct megasas_instance { unsigned long base_addr; struct megasas_register_set __iomem *reg_set; + struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; s8 init_id; u16 max_num_sge; -- cgit v1.2.3-59-g8ed1b From 044833b572b96afe91506a0edec42efd84ba4939 Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:33:06 -0600 Subject: [SCSI] megaraid_sas: report system PDs to OS When OS issue inquiry, it will check driver's internal pd_list. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 91 +++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 48c3658d73a7..9967ee72a967 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -1115,24 +1115,76 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) return 0; } +static struct megasas_instance *megasas_lookup_instance(u16 host_no) +{ + int i; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + + if ((megasas_mgmt_info.instance[i]) && + (megasas_mgmt_info.instance[i]->host->host_no == host_no)) + return megasas_mgmt_info.instance[i]; + } + + return NULL; +} + static int megasas_slave_configure(struct scsi_device *sdev) { + u16 pd_index = 0; + struct megasas_instance *instance ; + + instance = megasas_lookup_instance(sdev->host->host_no); + /* - * Don't export physical disk devices to the disk driver. - * - * FIXME: Currently we don't export them to the midlayer at all. - * That will be fixed once LSI engineers have audited the - * firmware for possible issues. - */ - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) + * Don't export physical disk devices to the disk driver. + * + * FIXME: Currently we don't export them to the midlayer at all. + * That will be fixed once LSI engineers have audited the + * firmware for possible issues. + */ + if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && + sdev->type == TYPE_DISK) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + blk_queue_rq_timeout(sdev->request_queue, + MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); + return 0; + } return -ENXIO; + } /* - * The RAID firmware may require extended timeouts. - */ - if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) - blk_queue_rq_timeout(sdev->request_queue, - MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); + * The RAID firmware may require extended timeouts. + */ + blk_queue_rq_timeout(sdev->request_queue, + MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); + return 0; +} + +static int megasas_slave_alloc(struct scsi_device *sdev) +{ + u16 pd_index = 0; + struct megasas_instance *instance ; + instance = megasas_lookup_instance(sdev->host->host_no); + if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && + (sdev->type == TYPE_DISK)) { + /* + * Open the OS scan to the SYSTEM PD + */ + pd_index = + (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + if ((instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) && + (instance->pd_list[pd_index].driveType == + TYPE_DISK)) { + return 0; + } + return -ENXIO; + } return 0; } @@ -1423,6 +1475,7 @@ static struct scsi_host_template megasas_template = { .name = "LSI SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, + .slave_alloc = megasas_slave_alloc, .queuecommand = megasas_queue_command, .eh_device_reset_handler = megasas_reset_device, .eh_bus_reset_handler = megasas_reset_bus_host, @@ -3455,20 +3508,6 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, return error; } -static struct megasas_instance *megasas_lookup_instance(u16 host_no) -{ - int i; - - for (i = 0; i < megasas_mgmt_info.max_index; i++) { - - if ((megasas_mgmt_info.instance[i]) && - (megasas_mgmt_info.instance[i]->host->host_no == host_no)) - return megasas_mgmt_info.instance[i]; - } - - return NULL; -} - static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = -- cgit v1.2.3-59-g8ed1b From 7bebf5c79cb62766c76c6c1b9c77b86496fd363e Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:40:58 -0600 Subject: [SCSI] megaraid_sas: allocate the application cmds to sas2 controller MegaRAID SAS2 controller ioctl can't use 32 cmd for applications. Driver need to divide different number of cmds to IO and application. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 24 +++++++++++++++++++++--- drivers/scsi/megaraid/megaraid_sas.h | 1 + 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 9967ee72a967..5afd65121725 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -1239,7 +1239,14 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; - instance->host->can_queue = + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + instance->host->can_queue = + instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; + } else + instance->host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; spin_unlock_irqrestore(instance->host->host_lock, flags); @@ -2774,7 +2781,13 @@ static int megasas_io_attach(struct megasas_instance *instance) */ host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; - host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + host->can_queue = + instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; + } else + host->can_queue = + instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; host->max_sectors = instance->max_sectors_per_req; @@ -2909,7 +2922,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); - sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); /* * Initialize PCI related and misc parameters @@ -2919,6 +2931,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); + } else + sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); + megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 8ac6b2659c12..4c78cd32e757 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -655,6 +655,7 @@ struct megasas_ctrl_info { * is shown below */ #define MEGASAS_INT_CMDS 32 +#define MEGASAS_SKINNY_INT_CMDS 5 /* * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit -- cgit v1.2.3-59-g8ed1b From f4c9a1317d32bb0af7546ef0c1dcc3be52dc8d0a Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:43:28 -0600 Subject: [SCSI] megaraid_sas: add the IEEE SGE support to SAS2 controller To increase the performance, megaraid sas driver added the IEEE SGE support to support SAS2 controller. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 80 ++++++++++++++++++++++++++++++++---- drivers/scsi/megaraid/megaraid_sas.h | 9 ++++ 2 files changed, 82 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 5afd65121725..4c04a68bad6c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -696,6 +696,35 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, return sge_count; } +/** + * megasas_make_sgl_skinny - Prepares IEEE SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static int +megasas_make_sgl_skinny(struct megasas_instance *instance, + struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + sge_count = scsi_dma_map(scp); + + if (sge_count) { + scsi_for_each_sg(scp, os_sgl, sge_count, i) { + mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge_skinny[i].phys_addr = + sg_dma_address(os_sgl); + } + } + return sge_count; +} + /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame @@ -704,7 +733,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, * Returns the number of frames required for numnber of sge's (sge_count) */ -static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) +static u32 megasas_get_frame_count(struct megasas_instance *instance, + u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; @@ -714,6 +744,10 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); + if (instance->flag_ieee) { + sge_sz = sizeof(struct megasas_sge_skinny); + } + /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & @@ -721,12 +755,16 @@ static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { - if (IS_DMA64) + if (instance->flag_ieee == 1) { + num_cnt = sge_count - 1; + } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { - if (IS_DMA64) + if (instance->flag_ieee == 1) { + num_cnt = sge_count - 1; + } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; @@ -775,6 +813,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, else if (scp->sc_data_direction == PCI_DMA_NONE) flags = MFI_FRAME_DIR_NONE; + if (instance->flag_ieee == 1) { + flags |= MFI_FRAME_IEEE; + } + /* * Prepare the DCDB frame */ @@ -804,7 +846,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, /* * Construct SGL */ - if (IS_DMA64) { + if (instance->flag_ieee == 1) { + pthru->flags |= MFI_FRAME_SGL64; + pthru->sge_count = megasas_make_sgl_skinny(instance, scp, + &pthru->sgl); + } else if (IS_DMA64) { pthru->flags |= MFI_FRAME_SGL64; pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); @@ -823,7 +869,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ - cmd->frame_count = megasas_get_frame_count(pthru->sge_count, + cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; @@ -854,6 +900,10 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; + if (instance->flag_ieee == 1) { + flags |= MFI_FRAME_IEEE; + } + /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ @@ -924,7 +974,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, /* * Construct SGL */ - if (IS_DMA64) { + if (instance->flag_ieee) { + ldio->flags |= MFI_FRAME_SGL64; + ldio->sge_count = megasas_make_sgl_skinny(instance, scp, + &ldio->sgl); + } else if (IS_DMA64) { ldio->flags |= MFI_FRAME_SGL64; ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else @@ -941,7 +995,8 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ - cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME); + cmd->frame_count = megasas_get_frame_count(instance, + ldio->sge_count, IO_FRAME); return cmd->frame_count; } @@ -1929,6 +1984,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); + if (instance->flag_ieee) { + sge_sz = sizeof(struct megasas_sge_skinny); + } + /* * Calculated the number of 64byte frames required for SGL */ @@ -2725,6 +2784,11 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); + if (instance->aen_cmd != NULL) { + megasas_return_cmd(instance, cmd); + return 0; + } + /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this @@ -2895,6 +2959,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) *instance->producer = 0; *instance->consumer = 0; megasas_poll_wait_aen = 0; + instance->flag_ieee = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct @@ -2933,6 +2998,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + instance->flag_ieee = 1; sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 4c78cd32e757..a1fd44bc1817 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -96,6 +96,7 @@ #define MFI_FRAME_DIR_WRITE 0x0008 #define MFI_FRAME_DIR_READ 0x0010 #define MFI_FRAME_DIR_BOTH 0x0018 +#define MFI_FRAME_IEEE 0x0020 /* * Definition for cmd_status @@ -732,10 +733,17 @@ struct megasas_sge64 { } __attribute__ ((packed)); +struct megasas_sge_skinny { + u64 phys_addr; + u32 length; + u32 flag; +} __packed; + union megasas_sgl { struct megasas_sge32 sge32[1]; struct megasas_sge64 sge64[1]; + struct megasas_sge_skinny sge_skinny[1]; } __attribute__ ((packed)); @@ -1210,6 +1218,7 @@ struct megasas_instance { u8 flag; u8 unload; + u8 flag_ieee; unsigned long last_time; struct timer_list io_completion_timer; -- cgit v1.2.3-59-g8ed1b From 0c79e681eef10810a5ed41a2eb1dce244ab1c37d Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:47:35 -0600 Subject: [SCSI] megaraid_sas: Fix the fix for fw hang caused by megaraid sas application Add a lock to the skinny firmware initialisation sequence to prevent the two stage write being non atomic if multiple instances use it. Add a flag to the driver shutdown sequence to prevent aen ioctls being called after shutdown begins. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 75 ++++++++++++++++++++++++++++++------ drivers/scsi/megaraid/megaraid_sas.h | 25 ++++++------ 2 files changed, 77 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 4c04a68bad6c..6d998e050338 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -226,7 +226,10 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) * @regs : MFI register set */ static inline void -megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs) +megasas_fire_cmd_xscale(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) { writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); @@ -323,7 +326,10 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) * @regs : MFI register set */ static inline void -megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) +megasas_fire_cmd_ppc(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, + struct megasas_register_set __iomem *regs) { writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); @@ -413,12 +419,17 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) * @regs : MFI register set */ static inline void -megasas_fire_cmd_skinny(dma_addr_t frame_phys_addr, u32 frame_count, +megasas_fire_cmd_skinny(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, struct megasas_register_set __iomem *regs) { + unsigned long flags; + spin_lock_irqsave(&instance->fire_lock, flags); writel(0, &(regs)->inbound_high_queue_port); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); + spin_unlock_irqrestore(&instance->fire_lock, flags); } static struct megasas_instance_template megasas_instance_template_skinny = { @@ -508,7 +519,9 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) * @regs : MFI register set */ static inline void -megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count, +megasas_fire_cmd_gen2(struct megasas_instance *instance, + dma_addr_t frame_phys_addr, + u32 frame_count, struct megasas_register_set __iomem *regs) { writel((frame_phys_addr | (frame_count<<1))|1, @@ -550,7 +563,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) /* * Issue the frame using inbound queue port */ - instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, 0, instance->reg_set); /* * Wait for cmd_status to change @@ -581,7 +595,8 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, { cmd->cmd_status = ENODATA; - instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, 0, instance->reg_set); wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); @@ -626,7 +641,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; - instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, 0, instance->reg_set); /* * Wait for this cmd to complete @@ -1153,7 +1169,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) */ atomic_inc(&instance->fw_outstanding); - instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set); + instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, + cmd->frame_count-1, instance->reg_set); /* * Check if we have pend cmds to be completed */ @@ -1346,8 +1363,16 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) * Send signal to FW to stop processing any pending cmds. * The controller will be taken offline by the OS now. */ - writel(MFI_STOP_ADP, + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + writel(MFI_STOP_ADP, + &instance->reg_set->reserved_0[0]); + } else { + writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); + } megasas_dump_pending_frames(instance); instance->hw_crit_error = 1; return FAILED; @@ -1799,7 +1824,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) /* * Set the CLR bit in inbound doorbell */ - if ((instance->pdev->device == \ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { @@ -2799,7 +2824,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, /* * Issue the aen registration frame */ - instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, 0, instance->reg_set); return 0; } @@ -2983,6 +3009,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->cmd_pool_lock); + spin_lock_init(&instance->fire_lock); spin_lock_init(&instance->completion_lock); spin_lock_init(&poll_aen_lock); @@ -3005,7 +3032,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) megasas_dbg_lvl = 0; instance->flag = 0; - instance->unload = 0; + instance->unload = 1; instance->last_time = 0; /* @@ -3051,6 +3078,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (megasas_io_attach(instance)) goto fail_io_attach; + instance->unload = 0; return 0; fail_start_aen: @@ -3174,6 +3202,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) instance = pci_get_drvdata(pdev); host = instance->host; + instance->unload = 1; if (poll_mode_io) del_timer_sync(&instance->io_completion_timer); @@ -3269,6 +3298,8 @@ megasas_resume(struct pci_dev *pdev) megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); + instance->unload = 0; + return 0; fail_irq: @@ -3366,6 +3397,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) static void megasas_shutdown(struct pci_dev *pdev) { struct megasas_instance *instance = pci_get_drvdata(pdev); + instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); } @@ -3615,6 +3647,17 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) goto out_kfree_ioc; } + if (instance->hw_crit_error == 1) { + printk(KERN_DEBUG "Controller in Crit ERROR\n"); + error = -ENODEV; + goto out_kfree_ioc; + } + + if (instance->unload == 1) { + error = -ENODEV; + goto out_kfree_ioc; + } + /* * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds */ @@ -3650,6 +3693,14 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) if (!instance) return -ENODEV; + if (instance->hw_crit_error == 1) { + error = -ENODEV; + } + + if (instance->unload == 1) { + return -ENODEV; + } + mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a1fd44bc1817..13ac37e80075 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1157,17 +1157,6 @@ struct megasas_evt_detail { } __attribute__ ((packed)); - struct megasas_instance_template { - void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *); - - void (*enable_intr)(struct megasas_register_set __iomem *) ; - void (*disable_intr)(struct megasas_register_set __iomem *); - - int (*clear_intr)(struct megasas_register_set __iomem *); - - u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); - }; - struct megasas_instance { u32 *producer; @@ -1193,6 +1182,8 @@ struct megasas_instance { spinlock_t cmd_pool_lock; /* used to synch producer, consumer ptrs in dpc */ spinlock_t completion_lock; + /* used to sync fire the cmd to fw */ + spinlock_t fire_lock; struct dma_pool *frame_dma_pool; struct dma_pool *sense_dma_pool; @@ -1224,6 +1215,18 @@ struct megasas_instance { struct timer_list io_completion_timer; }; +struct megasas_instance_template { + void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \ + u32, struct megasas_register_set __iomem *); + + void (*enable_intr)(struct megasas_register_set __iomem *) ; + void (*disable_intr)(struct megasas_register_set __iomem *); + + int (*clear_intr)(struct megasas_register_set __iomem *); + + u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); +}; + #define MEGASAS_IS_LOGICAL(scp) \ (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 -- cgit v1.2.3-59-g8ed1b From 7e8a75f4dfbff173977b2f58799c3eceb7b09afd Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:50:17 -0600 Subject: [SCSI] megaraid_sas: Add the support for updating the OS after adding/removing the devices from FW Driver will update the OS devices after adding and deleting the device from FW. When driver receive add or delete AEN from FW, driver will send the DCMD cmd to get the System PD list from FW. Then driver will check if this device already in the OS: If add event and OS don't have the device (but it is in the list), driver add the device to OS, otherwise driver will not add. If remove event, driver will check the list, if is not in the list, but OS have the device, driver will remove the device. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 125 ++++++++++++++++++++++++++++++++++- drivers/scsi/megaraid/megaraid_sas.h | 17 +++++ 2 files changed, 141 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 6d998e050338..b0d6991cb6f3 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -1520,6 +1520,8 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, return 0; } +static void megasas_aen_polling(struct work_struct *work); + /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state @@ -1551,6 +1553,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); + + if (instance->unload == 0) { + struct megasas_aen_event *ev; + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + printk(KERN_ERR "megasas_service_aen: out of memory\n"); + } else { + ev->instance = instance; + instance->ev = ev; + INIT_WORK(&ev->hotplug_work, megasas_aen_polling); + schedule_delayed_work( + (struct delayed_work *)&ev->hotplug_work, 0); + } + } } /* @@ -2075,6 +2091,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) } cmd->frame->io.context = cmd->index; + cmd->frame->io.pad_0 = 0; } return 0; @@ -2271,7 +2288,6 @@ megasas_get_pd_list(struct megasas_instance *instance) return ret; } - /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state @@ -2986,6 +3002,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) *instance->consumer = 0; megasas_poll_wait_aen = 0; instance->flag_ieee = 0; + instance->ev = NULL; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct @@ -3209,6 +3226,16 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); + + /* cancel the delayed work if this work still in queue */ + if (instance->ev != NULL) { + struct megasas_aen_event *ev = instance->ev; + cancel_delayed_work( + (struct delayed_work *)&ev->hotplug_work); + flush_scheduled_work(); + instance->ev = NULL; + } + tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); @@ -3349,6 +3376,16 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); + + /* cancel the delayed work if this work still in queue*/ + if (instance->ev != NULL) { + struct megasas_aen_event *ev = instance->ev; + cancel_delayed_work( + (struct delayed_work *)&ev->hotplug_work); + flush_scheduled_work(); + instance->ev = NULL; + } + tasklet_kill(&instance->isr_tasklet); /* @@ -3913,6 +3950,92 @@ out: return retval; } +static void +megasas_aen_polling(struct work_struct *work) +{ + struct megasas_aen_event *ev = + container_of(work, struct megasas_aen_event, hotplug_work); + struct megasas_instance *instance = ev->instance; + union megasas_evt_class_locale class_locale; + struct Scsi_Host *host; + struct scsi_device *sdev1; + u16 pd_index = 0; + int i, j, doscan = 0; + u32 seq_num; + int error; + + if (!instance) { + printk(KERN_ERR "invalid instance!\n"); + kfree(ev); + return; + } + instance->ev = NULL; + host = instance->host; + if (instance->evt_detail) { + + switch (instance->evt_detail->code) { + case MR_EVT_PD_INSERTED: + case MR_EVT_PD_REMOVED: + case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: + doscan = 1; + break; + default: + doscan = 0; + break; + } + } else { + printk(KERN_ERR "invalid evt_detail!\n"); + kfree(ev); + return; + } + + if (doscan) { + printk(KERN_INFO "scanning ...\n"); + megasas_get_pd_list(instance); + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; + sdev1 = scsi_device_lookup(host, i, j, 0); + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + if (!sdev1) { + scsi_add_device(host, i, j, 0); + } + if (sdev1) + scsi_device_put(sdev1); + } else { + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); + } + } + } + } + } + + if ( instance->aen_cmd != NULL ) { + kfree(ev); + return ; + } + + seq_num = instance->evt_detail->seq_num + 1; + + /* Register AEN with FW for latest sequence number plus 1 */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + mutex_lock(&instance->aen_mutex); + error = megasas_register_aen(instance, seq_num, + class_locale.word); + mutex_unlock(&instance->aen_mutex); + + if (error) + printk(KERN_ERR "register aen failed error %x\n", error); + + kfree(ev); +} + + static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, megasas_sysfs_show_poll_mode_io, megasas_sysfs_set_poll_mode_io); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 13ac37e80075..cd1c008f9ab8 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -285,6 +285,17 @@ enum MR_PD_QUERY_TYPE { MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; +#define MR_EVT_CFG_CLEARED 0x0004 +#define MR_EVT_LD_STATE_CHANGE 0x0051 +#define MR_EVT_PD_INSERTED 0x005b +#define MR_EVT_PD_REMOVED 0x0070 +#define MR_EVT_LD_CREATED 0x008a +#define MR_EVT_LD_DELETED 0x008b +#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db +#define MR_EVT_LD_OFFLINE 0x00fc +#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 +#define MAX_LOGICAL_DRIVES 64 + enum MR_PD_STATE { MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, MR_PD_STATE_UNCONFIGURED_BAD = 0x01, @@ -1157,6 +1168,11 @@ struct megasas_evt_detail { } __attribute__ ((packed)); +struct megasas_aen_event { + struct work_struct hotplug_work; + struct megasas_instance *instance; +}; + struct megasas_instance { u32 *producer; @@ -1176,6 +1192,7 @@ struct megasas_instance { u16 max_num_sge; u16 max_fw_cmds; u32 max_sectors_per_req; + struct megasas_aen_event *ev; struct megasas_cmd **cmd_list; struct list_head cmd_pool; -- cgit v1.2.3-59-g8ed1b From 7b2519afa1abd1b9f63aa1e90879307842422dae Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:52:20 -0600 Subject: [SCSI] megaraid_sas: fix 64 bit sense pointer truncation The current sense pointer is cast to a u32 pointer, which can truncate on 64 bits. Fix by using unsigned long instead. Signed-off-by Bo Yang Cc: stable@kernel.org Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index b0d6991cb6f3..23056721a8c9 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -3515,7 +3515,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; - u32 *sense_ptr; + unsigned long *sense_ptr; memset(kbuff_arr, 0, sizeof(kbuff_arr)); @@ -3593,7 +3593,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, } sense_ptr = - (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); + (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); *sense_ptr = sense_handle; } @@ -3624,8 +3624,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * sense_ptr points to the location that has the user * sense buffer address */ - sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + - ioc->sense_off); + sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + + ioc->sense_off); if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), sense, ioc->sense_len)) { -- cgit v1.2.3-59-g8ed1b From 7218df69e3609d1fcf4d83cf8f3fc89dbfbf82a8 Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:52:20 -0600 Subject: [SCSI] megaraid_sas: use the firmware boot timeout when waiting for commands use the constant MEGASAS_RESET_WAIT_TIME when waiting for firmware commands to complete (currently 3 minutes). Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 23056721a8c9..efd41c1f946c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -1820,6 +1820,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) u8 max_wait; u32 fw_state; u32 cur_state; + u32 abs_state, curr_abs_state; fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; @@ -1829,6 +1830,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) while (fw_state != MFI_STATE_READY) { + abs_state = + instance->instancet->read_fw_status_reg(instance->reg_set); + switch (fw_state) { case MFI_STATE_FAULT: @@ -1854,7 +1858,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) &instance->reg_set->inbound_doorbell); } - max_wait = 2; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; @@ -1869,7 +1873,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); - max_wait = 10; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; @@ -1888,7 +1892,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); - max_wait = 60; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_OPERATIONAL; break; @@ -1896,32 +1900,32 @@ megasas_transition_to_ready(struct megasas_instance* instance) /* * This state should not last for more than 2 seconds */ - max_wait = 2; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: - max_wait = 2; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: - max_wait = 20; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: - max_wait = 20; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: - max_wait = 20; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: - max_wait = 20; + max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FLUSH_CACHE; break; @@ -1937,8 +1941,10 @@ megasas_transition_to_ready(struct megasas_instance* instance) for (i = 0; i < (max_wait * 1000); i++) { fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK ; + curr_abs_state = + instance->instancet->read_fw_status_reg(instance->reg_set); - if (fw_state == cur_state) { + if (abs_state == curr_abs_state) { msleep(1); } else break; @@ -1947,7 +1953,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) /* * Return error if fw_state hasn't changed after max_wait */ - if (fw_state == cur_state) { + if (curr_abs_state == abs_state) { printk(KERN_DEBUG "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; -- cgit v1.2.3-59-g8ed1b From a0b7736828f7615823a7dec680632656d9a9edde Mon Sep 17 00:00:00 2001 From: "Yang, Bo" Date: Tue, 6 Oct 2009 14:55:09 -0600 Subject: [SCSI] megaraid_sas: Update version number and documentation Signed-off-by Bo Yang Signed-off-by: James Bottomley --- Documentation/scsi/ChangeLog.megaraid_sas | 62 +++++++++++++++++++++++++++++++ drivers/scsi/megaraid/megaraid_sas.c | 2 +- drivers/scsi/megaraid/megaraid_sas.h | 6 +-- 3 files changed, 66 insertions(+), 4 deletions(-) diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index c851ef497795..151a7b718b8c 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -1,3 +1,65 @@ +1 Release Date : Tues. July 28, 2009 10:12:45 PST 2009 - + (emaild-id:megaraidlinux@lsi.com) + Bo Yang + +2 Current Version : 00.00.04.12 +3 Older Version : 00.00.04.10 + +1. Change the AEN sys PD update from scsi_scan to + scsi_add_device and scsi_remove_device. +2. Takeoff the debug print-out in aen_polling routine. + +1 Release Date : Thur. July 02, 2009 10:12:45 PST 2009 - + (emaild-id:megaraidlinux@lsi.com) + Bo Yang + +2 Current Version : 00.00.04.10 +3 Older Version : 00.00.04.08 + +1. Add the 3 mins timeout during the controller initialize. +2. Add the fix for 64bit sense date errors. + +1 Release Date : Tues. May 05, 2009 10:12:45 PST 2009 - + (emaild-id:megaraidlinux@lsi.com) + Bo Yang + +2 Current Version : 00.00.04.08 +3 Older Version : 00.00.04.06 + +1. Add the fix of pending in FW after deleted the logic drives. +2. Add the fix of deallocating memory after get pdlist. + +1 Release Date : Tues. March 26, 2009 10:12:45 PST 2009 - + (emaild-id:megaraidlinux@lsi.com) + Bo Yang + +2 Current Version : 00.00.04.06 +3 Older Version : 00.00.04.04 + +1. Add the fix of the driver cmd empty fix of the driver cmd empty. +2. Add the fix of the driver MSM AEN CMD cause the system slow. + +1 Release Date : Tues. March 03, 2009 10:12:45 PST 2009 - + (emaild-id:megaraidlinux@lsi.com) + Bo Yang + +2 Current Version : 00.00.04.04 +3 Older Version : 00.00.04.01 + +1. Add the Tape drive fix to the driver: If the command is for + the tape device, set the pthru timeout to the os layer timeout value. + +2. Add Poll_wait mechanism to Gen-2 Linux driv. + In the aen handler, driver needs to wakeup poll handler similar to + the way it raises SIGIO. + +3. Add new controller new SAS2 support to the driver. + +4. Report the unconfigured PD (system PD) to OS. + +5. Add the IEEE SGL support to the driver + +6. Reasign the Application cmds to SAS2 controller 1 Release Date : Thur.July. 24 11:41:51 PST 2008 - (emaild-id:megaraidlinux@lsi.com) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index efd41c1f946c..134c63ef6d38 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_sas.c - * Version : v00.00.04.01-rc1 + * Version : v00.00.04.12-rc1 * * Authors: * (email-id : megaraidlinux@lsi.com) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index cd1c008f9ab8..72b28e436e32 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -18,9 +18,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.04.01" -#define MEGASAS_RELDATE "July 24, 2008" -#define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008" +#define MEGASAS_VERSION "00.00.04.12-rc1" +#define MEGASAS_RELDATE "Sep. 17, 2009" +#define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009" /* * Device IDs -- cgit v1.2.3-59-g8ed1b From e39e145dfb78d4e20d89139d2576306b4279c126 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 7 Oct 2009 11:26:54 +0530 Subject: [SCSI] mptctl : Remove printk which floods unnecessary messages to var/log/message Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptctl.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 9b2e2198aee9..352acd05c46b 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -621,11 +621,8 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || - (iocp == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnumX); + (iocp == NULL)) return -ENODEV; - } if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", -- cgit v1.2.3-59-g8ed1b From 9b53b39243cf23a0b68eaa16c37ce16eada69a46 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 7 Oct 2009 11:27:40 +0530 Subject: [SCSI] mptspi: Fix for incorrect data underrun errata Errata: Certain conditions on the scsi bus may casue the 53C1030 to incorrectly signal a SCSI_DATA_UNDERRUN to the host. Workaround 1: For an Errata on LSI53C1030 When the length of request data and transfer data are different with result of command (READ or VERIFY), DID_SOFT_ERROR is set. Workaround 2: For potential trouble on LSI53C1030. It is checked whether the length of request data is equal to the length of transfer and residual. MEDIUM_ERROR is set by incorrect data. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptscsih.c | 86 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 81 insertions(+), 5 deletions(-) diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index c29578614504..f68ec48a881e 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -792,11 +792,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) * precedence! */ sc->result = (DID_OK << 16) | scsi_status; - if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) { - /* Have already saved the status and sense data + if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) { + + /* + * For an Errata on LSI53C1030 + * When the length of request data + * and transfer data are different + * with result of command (READ or VERIFY), + * DID_SOFT_ERROR is set. */ - ; - } else { + if (ioc->bus_type == SPI) { + if (pScsiReq->CDB[0] == READ_6 || + pScsiReq->CDB[0] == READ_10 || + pScsiReq->CDB[0] == READ_12 || + pScsiReq->CDB[0] == READ_16 || + pScsiReq->CDB[0] == VERIFY || + pScsiReq->CDB[0] == VERIFY_16) { + if (scsi_bufflen(sc) != + xfer_cnt) { + sc->result = + DID_SOFT_ERROR << 16; + printk(KERN_WARNING "Errata" + "on LSI53C1030 occurred." + "sc->req_bufflen=0x%02x," + "xfer_cnt=0x%02x\n", + scsi_bufflen(sc), + xfer_cnt); + } + } + } + if (xfer_cnt < sc->underflow) { if (scsi_status == SAM_STAT_BUSY) sc->result = SAM_STAT_BUSY; @@ -835,7 +860,58 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) sc->result = (DID_OK << 16) | scsi_status; if (scsi_state == 0) { ; - } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) { + } else if (scsi_state & + MPI_SCSI_STATE_AUTOSENSE_VALID) { + + /* + * For potential trouble on LSI53C1030. + * (date:2007.xx.) + * It is checked whether the length of + * request data is equal to + * the length of transfer and residual. + * MEDIUM_ERROR is set by incorrect data. + */ + if ((ioc->bus_type == SPI) && + (sc->sense_buffer[2] & 0x20)) { + u32 difftransfer; + difftransfer = + sc->sense_buffer[3] << 24 | + sc->sense_buffer[4] << 16 | + sc->sense_buffer[5] << 8 | + sc->sense_buffer[6]; + if (((sc->sense_buffer[3] & 0x80) == + 0x80) && (scsi_bufflen(sc) + != xfer_cnt)) { + sc->sense_buffer[2] = + MEDIUM_ERROR; + sc->sense_buffer[12] = 0xff; + sc->sense_buffer[13] = 0xff; + printk(KERN_WARNING"Errata" + "on LSI53C1030 occurred." + "sc->req_bufflen=0x%02x," + "xfer_cnt=0x%02x\n" , + scsi_bufflen(sc), + xfer_cnt); + } + if (((sc->sense_buffer[3] & 0x80) + != 0x80) && + (scsi_bufflen(sc) != + xfer_cnt + difftransfer)) { + sc->sense_buffer[2] = + MEDIUM_ERROR; + sc->sense_buffer[12] = 0xff; + sc->sense_buffer[13] = 0xff; + printk(KERN_WARNING + "Errata on LSI53C1030 occurred" + "sc->req_bufflen=0x%02x," + " xfer_cnt=0x%02x," + "difftransfer=0x%02x\n", + scsi_bufflen(sc), + xfer_cnt, + difftransfer); + } + } + /* * If running against circa 200003dd 909 MPT f/w, * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL -- cgit v1.2.3-59-g8ed1b From 080bb708ad8f21ea743d1a9233fbc62af0feb10b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 7 Oct 2009 11:28:50 +0530 Subject: [SCSI] mptfusion: Bump version to 3.04.13 Bump version 3.04.13. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 8dd4d219e433..b4948671eb92 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.12" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.12" +#define MPT_LINUX_VERSION_COMMON "3.04.13" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ -- cgit v1.2.3-59-g8ed1b From cbacfb5fd9a4689b55157753b8ba4455415fb85c Mon Sep 17 00:00:00 2001 From: Ed Lin Date: Mon, 28 Sep 2009 22:58:17 -0800 Subject: [SCSI] stex: add small dma buffer support The controllers of st_seq and st_vsc type can work if only small dma buffer is available, with a reduced firmware feature set. Add support for this case. Signed-off-by: Ed Lin Signed-off-by: James Bottomley --- drivers/scsi/stex.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 09fa8861fc58..af5bafcccf1f 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -160,6 +160,7 @@ enum { INQUIRY_EVPD = 0x01, ST_ADDITIONAL_MEM = 0x200000, + ST_ADDITIONAL_MEM_MIN = 0x80000, }; struct st_sgitem { @@ -1001,7 +1002,7 @@ static int stex_common_handshake(struct st_hba *hba) h->partner_type = HMU_PARTNER_TYPE; if (hba->extra_offset) { h->extra_offset = cpu_to_le32(hba->extra_offset); - h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM); + h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); } else h->extra_offset = h->extra_size = 0; @@ -1528,10 +1529,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); if (!hba->dma_mem) { - err = -ENOMEM; - printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", - pci_name(pdev)); - goto out_iounmap; + /* Retry minimum coherent mapping for st_seq and st_vsc */ + if (hba->cardtype == st_seq || + (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { + printk(KERN_WARNING DRV_NAME + "(%s): allocating min buffer for controller\n", + pci_name(pdev)); + hba->dma_size = hba->extra_offset + + ST_ADDITIONAL_MEM_MIN; + hba->dma_mem = dma_alloc_coherent(&pdev->dev, + hba->dma_size, &hba->dma_handle, GFP_KERNEL); + } + + if (!hba->dma_mem) { + err = -ENOMEM; + printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", + pci_name(pdev)); + goto out_iounmap; + } } hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); -- cgit v1.2.3-59-g8ed1b From 9eb46d2a08de537e14e92216bf18e7cb541d2f67 Mon Sep 17 00:00:00 2001 From: Ed Lin Date: Mon, 28 Sep 2009 22:58:33 -0800 Subject: [SCSI] stex: add support for reset request from firmware Add support for reset request from firmware for controllers of st_shasta and st_yel type. Code adjustments necessary for this change are also included. Signed-off-by: Ed Lin Signed-off-by: James Bottomley --- drivers/scsi/stex.c | 249 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 166 insertions(+), 83 deletions(-) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index af5bafcccf1f..79216ee8112b 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -64,24 +64,24 @@ enum { YH2I_REQ_HI = 0xc4, /* MU register value */ - MU_INBOUND_DOORBELL_HANDSHAKE = 1, - MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, - MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, - MU_INBOUND_DOORBELL_HMUSTOPPED = 8, - MU_INBOUND_DOORBELL_RESET = 16, - - MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, - MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, - MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, - MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, - MU_OUTBOUND_DOORBELL_HASEVENT = 16, + MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), + MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), + MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), + MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), + MU_INBOUND_DOORBELL_RESET = (1 << 4), + + MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), + MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), + MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), + MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), + MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), + MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), /* MU status code */ MU_STATE_STARTING = 1, - MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, - MU_STATE_SEND_HANDSHAKE_FRAME = 3, - MU_STATE_STARTED = 4, - MU_STATE_RESETTING = 5, + MU_STATE_STARTED = 2, + MU_STATE_RESETTING = 3, + MU_STATE_FAILED = 4, MU_MAX_DELAY = 120, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, @@ -111,6 +111,8 @@ enum { SS_H2I_INT_RESET = 0x100, + SS_I2H_REQUEST_RESET = 0x2000, + SS_MU_OPERATIONAL = 0x80000000, STEX_CDB_LENGTH = 16, @@ -312,6 +314,10 @@ struct st_hba { struct st_ccb *wait_ccb; __le32 *scratch; + char work_q_name[20]; + struct workqueue_struct *work_q; + struct work_struct reset_work; + wait_queue_head_t reset_waitq; unsigned int mu_status; unsigned int cardtype; int msi_enabled; @@ -578,6 +584,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) lun = cmd->device->lun; hba = (struct st_hba *) &host->hostdata[0]; + if (unlikely(hba->mu_status == MU_STATE_RESETTING)) + return SCSI_MLQUEUE_HOST_BUSY; + switch (cmd->cmnd[0]) { case MODE_SENSE_10: { @@ -842,7 +851,6 @@ static irqreturn_t stex_intr(int irq, void *__hba) void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; - int handled = 0; spin_lock_irqsave(hba->host->host_lock, flags); @@ -853,12 +861,16 @@ static irqreturn_t stex_intr(int irq, void *__hba) writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); - handled = 1; + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && + hba->cardtype == st_shasta)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); - return IRQ_RETVAL(handled); + return IRQ_NONE; } static void stex_ss_mu_intr(struct st_hba *hba) @@ -940,7 +952,6 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba) void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; - int handled = 0; spin_lock_irqsave(hba->host->host_lock, flags); @@ -949,12 +960,15 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba) /* clear the interrupt */ writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); - handled = 1; + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & SS_I2H_REQUEST_RESET)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); - return IRQ_RETVAL(handled); + return IRQ_NONE; } static int stex_common_handshake(struct st_hba *hba) @@ -1047,7 +1061,7 @@ static int stex_ss_handshake(struct st_hba *hba) struct st_msg_header *msg_h; struct handshake_frame *h; __le32 *scratch; - u32 data; + u32 data, scratch_size; unsigned long before; int ret = 0; @@ -1075,13 +1089,16 @@ static int stex_ss_handshake(struct st_hba *hba) stex_gettime(&h->hosttime); h->partner_type = HMU_PARTNER_TYPE; h->extra_offset = h->extra_size = 0; - h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32)); + scratch_size = (hba->sts_count+1)*sizeof(u32); + h->scratch_size = cpu_to_le32(scratch_size); data = readl(base + YINT_EN); data &= ~4; writel(data, base + YINT_EN); writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); + readl(base + YH2I_REQ_HI); writel(hba->dma_handle, base + YH2I_REQ); + readl(base + YH2I_REQ); /* flush */ scratch = hba->scratch; before = jiffies; @@ -1097,7 +1114,7 @@ static int stex_ss_handshake(struct st_hba *hba) msleep(1); } - *scratch = 0; + memset(scratch, 0, scratch_size); msg_h->flag = 0; return ret; } @@ -1106,19 +1123,24 @@ static int stex_handshake(struct st_hba *hba) { int err; unsigned long flags; + unsigned int mu_status; err = (hba->cardtype == st_yel) ? stex_ss_handshake(hba) : stex_common_handshake(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + mu_status = hba->mu_status; if (err == 0) { - spin_lock_irqsave(hba->host->host_lock, flags); hba->req_head = 0; hba->req_tail = 0; hba->status_head = 0; hba->status_tail = 0; hba->out_req_cnt = 0; hba->mu_status = MU_STATE_STARTED; - spin_unlock_irqrestore(hba->host->host_lock, flags); - } + } else + hba->mu_status = MU_STATE_FAILED; + if (mu_status == MU_STATE_RESETTING) + wake_up_all(&hba->reset_waitq); + spin_unlock_irqrestore(hba->host->host_lock, flags); return err; } @@ -1138,17 +1160,11 @@ static int stex_abort(struct scsi_cmnd *cmd) base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); - if (tag < host->can_queue && hba->ccb[tag].cmd == cmd) + if (tag < host->can_queue && + hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) hba->wait_ccb = &hba->ccb[tag]; - else { - for (tag = 0; tag < host->can_queue; tag++) - if (hba->ccb[tag].cmd == cmd) { - hba->wait_ccb = &hba->ccb[tag]; - break; - } - if (tag >= host->can_queue) - goto out; - } + else + goto out; if (hba->cardtype == st_yel) { data = readl(base + YI2H_INT); @@ -1222,6 +1238,37 @@ static void stex_hard_reset(struct st_hba *hba) hba->pdev->saved_config_space[i]); } +static int stex_yos_reset(struct st_hba *hba) +{ + void __iomem *base; + unsigned long flags, before; + int ret = 0; + + base = hba->mmio_base; + writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); + readl(base + IDBL); /* flush */ + before = jiffies; + while (hba->out_req_cnt > 0) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { + printk(KERN_WARNING DRV_NAME + "(%s): reset timeout\n", pci_name(hba->pdev)); + ret = -1; + break; + } + msleep(1); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret == -1) + hba->mu_status = MU_STATE_FAILED; + else + hba->mu_status = MU_STATE_STARTED; + wake_up_all(&hba->reset_waitq); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + static void stex_ss_reset(struct st_hba *hba) { writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); @@ -1229,66 +1276,86 @@ static void stex_ss_reset(struct st_hba *hba) ssleep(5); } -static int stex_reset(struct scsi_cmnd *cmd) +static int stex_do_reset(struct st_hba *hba) { - struct st_hba *hba; - void __iomem *base; - unsigned long flags, before; + struct st_ccb *ccb; + unsigned long flags; + unsigned int mu_status = MU_STATE_RESETTING; + u16 tag; - hba = (struct st_hba *) &cmd->device->host->hostdata[0]; + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->mu_status == MU_STATE_STARTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", + pci_name(hba->pdev)); + return 0; + } + while (hba->mu_status == MU_STATE_RESETTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + wait_event_timeout(hba->reset_waitq, + hba->mu_status != MU_STATE_RESETTING, + MU_MAX_DELAY * HZ); + spin_lock_irqsave(hba->host->host_lock, flags); + mu_status = hba->mu_status; + } - printk(KERN_INFO DRV_NAME - "(%s): resetting host\n", pci_name(hba->pdev)); - scsi_print_command(cmd); + if (mu_status != MU_STATE_RESETTING) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return (mu_status == MU_STATE_STARTED) ? 0 : -1; + } hba->mu_status = MU_STATE_RESETTING; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (hba->cardtype == st_yosemite) + return stex_yos_reset(hba); if (hba->cardtype == st_shasta) stex_hard_reset(hba); else if (hba->cardtype == st_yel) stex_ss_reset(hba); - if (hba->cardtype != st_yosemite) { - if (stex_handshake(hba)) { - printk(KERN_WARNING DRV_NAME - "(%s): resetting: handshake failed\n", - pci_name(hba->pdev)); - return FAILED; + spin_lock_irqsave(hba->host->host_lock, flags); + for (tag = 0; tag < hba->host->can_queue; tag++) { + ccb = &hba->ccb[tag]; + if (ccb->req == NULL) + continue; + ccb->req = NULL; + if (ccb->cmd) { + scsi_dma_unmap(ccb->cmd); + ccb->cmd->result = DID_RESET << 16; + ccb->cmd->scsi_done(ccb->cmd); + ccb->cmd = NULL; } - return SUCCESS; } + spin_unlock_irqrestore(hba->host->host_lock, flags); - /* st_yosemite */ - writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL); - readl(hba->mmio_base + IDBL); /* flush */ - before = jiffies; - while (hba->out_req_cnt > 0) { - if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { - printk(KERN_WARNING DRV_NAME - "(%s): reset timeout\n", pci_name(hba->pdev)); - return FAILED; - } - msleep(1); - } + if (stex_handshake(hba) == 0) + return 0; - base = hba->mmio_base; - writel(0, base + IMR0); - readl(base + IMR0); - writel(0, base + OMR0); - readl(base + OMR0); - writel(0, base + IMR1); - readl(base + IMR1); - writel(0, base + OMR1); - readl(base + OMR1); /* flush */ - spin_lock_irqsave(hba->host->host_lock, flags); - hba->req_head = 0; - hba->req_tail = 0; - hba->status_head = 0; - hba->status_tail = 0; - hba->out_req_cnt = 0; - hba->mu_status = MU_STATE_STARTED; - spin_unlock_irqrestore(hba->host->host_lock, flags); - return SUCCESS; + printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", + pci_name(hba->pdev)); + return -1; +} + +static int stex_reset(struct scsi_cmnd *cmd) +{ + struct st_hba *hba; + + hba = (struct st_hba *) &cmd->device->host->hostdata[0]; + + printk(KERN_INFO DRV_NAME + "(%s): resetting host\n", pci_name(hba->pdev)); + scsi_print_command(cmd); + + return stex_do_reset(hba) ? FAILED : SUCCESS; +} + +static void stex_reset_work(struct work_struct *work) +{ + struct st_hba *hba = container_of(work, struct st_hba, reset_work); + + stex_do_reset(hba); } static int stex_biosparam(struct scsi_device *sdev, @@ -1583,12 +1650,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->host = host; hba->pdev = pdev; + init_waitqueue_head(&hba->reset_waitq); + + snprintf(hba->work_q_name, sizeof(hba->work_q_name), + "stex_wq_%d", host->host_no); + hba->work_q = create_singlethread_workqueue(hba->work_q_name); + if (!hba->work_q) { + printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_ccb_free; + } + INIT_WORK(&hba->reset_work, stex_reset_work); err = stex_request_irq(hba); if (err) { printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", pci_name(pdev)); - goto out_ccb_free; + goto out_free_wq; } err = stex_handshake(hba); @@ -1617,6 +1696,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_free_irq: stex_free_irq(hba); +out_free_wq: + destroy_workqueue(hba->work_q); out_ccb_free: kfree(hba->ccb); out_pci_free: @@ -1684,6 +1765,8 @@ static void stex_hba_free(struct st_hba *hba) { stex_free_irq(hba); + destroy_workqueue(hba->work_q); + iounmap(hba->mmio_base); pci_release_regions(hba->pdev); -- cgit v1.2.3-59-g8ed1b From cce9c8aed7d3ac0a14815e99b4602ae6c854a0ba Mon Sep 17 00:00:00 2001 From: Ed Lin Date: Mon, 28 Sep 2009 22:58:36 -0800 Subject: [SCSI] stex: update version to 4.6.0000.4 Update version to 4.6.0000.4. Signed-off-by: Ed Lin Signed-off-by: James Bottomley --- drivers/scsi/stex.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 79216ee8112b..3058bb1aff95 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -36,11 +36,11 @@ #include #define DRV_NAME "stex" -#define ST_DRIVER_VERSION "4.6.0000.3" +#define ST_DRIVER_VERSION "4.6.0000.4" #define ST_VER_MAJOR 4 #define ST_VER_MINOR 6 #define ST_OEM 0 -#define ST_BUILD_VER 3 +#define ST_BUILD_VER 4 enum { /* MU register offset */ @@ -1488,8 +1488,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev) { int ret; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) - && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 0; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) -- cgit v1.2.3-59-g8ed1b From ad63082626f99651d261ccd8698ce4e997362f7e Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 28 Sep 2009 15:50:52 -0500 Subject: [SCSI] fix propogation of integrity errors When the Integrity check is done in scsi_io_completion it will set error to -EILSEQ. However, at this point error is no longer used, and blk_end_request_err has -EIO hardcoded. It looks like there was just porting mistake with this patch http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=3e695f89c5debb735e4ff051e9e58d8fb4e95110 and we meant to send error upwards, so this patch changes the hard coded EIO to the error variable. I have only boot tested this patch. Signed-off-by: Mike Christie Acked-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5987da857103..108655230b59 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -898,7 +898,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); scsi_print_command(cmd); } - if (blk_end_request_err(req, -EIO)) + if (blk_end_request_err(req, error)) scsi_requeue_command(q, cmd); else scsi_next_command(cmd); -- cgit v1.2.3-59-g8ed1b From 8798a694da59486e4a3ff0abeec183202fb34c20 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Fri, 9 Oct 2009 14:15:59 -0500 Subject: [SCSI] scsi_transport_fc: remove invalid BUG_ON I was doing some large lun count testing with 2.6.31 and hit a BUG_ON() in fc_timeout_deleted_rport(), and it seems like it should have been just a matter of time before someone did. It seems invalid to set port_state under lock, then expect it to remain set after releasing the lock. Another thread called fc_remote_port_add() when the lock was released, changing the port_state. This patch removes the BUG_ON and moves the test of the port_state to inside the host_lock. It's been running for several weeks now with no ill effect. Signed-off-by: Michael Reed Acked-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 68 +++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index a67fed10598a..f436e033adaf 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2384,6 +2384,7 @@ fc_rport_final_delete(struct work_struct *work) struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); unsigned long flags; + int do_callback = 0; /* * if a scan is pending, flush the SCSI Host work_q so that @@ -2422,8 +2423,15 @@ fc_rport_final_delete(struct work_struct *work) * Avoid this call if we already called it when we preserved the * rport for the binding. */ + spin_lock_irqsave(shost->host_lock, flags); if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && - (i->f->dev_loss_tmo_callbk)) + (i->f->dev_loss_tmo_callbk)) { + rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; + do_callback = 1; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (do_callback) i->f->dev_loss_tmo_callbk(rport); fc_bsg_remove(rport->rqst_q); @@ -2970,6 +2978,7 @@ fc_timeout_deleted_rport(struct work_struct *work) struct fc_internal *i = to_fc_internal(shost->transportt); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); unsigned long flags; + int do_callback = 0; spin_lock_irqsave(shost->host_lock, flags); @@ -3035,7 +3044,6 @@ fc_timeout_deleted_rport(struct work_struct *work) rport->roles = FC_PORT_ROLE_UNKNOWN; rport->port_state = FC_PORTSTATE_NOTPRESENT; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; - rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; /* * Pre-emptively kill I/O rather than waiting for the work queue @@ -3045,32 +3053,40 @@ fc_timeout_deleted_rport(struct work_struct *work) spin_unlock_irqrestore(shost->host_lock, flags); fc_terminate_rport_io(rport); - BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT); + spin_lock_irqsave(shost->host_lock, flags); - /* remove the identifiers that aren't used in the consisting binding */ - switch (fc_host->tgtid_bind_type) { - case FC_TGTID_BIND_BY_WWPN: - rport->node_name = -1; - rport->port_id = -1; - break; - case FC_TGTID_BIND_BY_WWNN: - rport->port_name = -1; - rport->port_id = -1; - break; - case FC_TGTID_BIND_BY_ID: - rport->node_name = -1; - rport->port_name = -1; - break; - case FC_TGTID_BIND_NONE: /* to keep compiler happy */ - break; + if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ + + /* remove the identifiers that aren't used in the consisting binding */ + switch (fc_host->tgtid_bind_type) { + case FC_TGTID_BIND_BY_WWPN: + rport->node_name = -1; + rport->port_id = -1; + break; + case FC_TGTID_BIND_BY_WWNN: + rport->port_name = -1; + rport->port_id = -1; + break; + case FC_TGTID_BIND_BY_ID: + rport->node_name = -1; + rport->port_name = -1; + break; + case FC_TGTID_BIND_NONE: /* to keep compiler happy */ + break; + } + + /* + * As this only occurs if the remote port (scsi target) + * went away and didn't come back - we'll remove + * all attached scsi devices. + */ + rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; + fc_queue_work(shost, &rport->stgt_delete_work); + + do_callback = 1; } - /* - * As this only occurs if the remote port (scsi target) - * went away and didn't come back - we'll remove - * all attached scsi devices. - */ - fc_queue_work(shost, &rport->stgt_delete_work); + spin_unlock_irqrestore(shost->host_lock, flags); /* * Notify the driver that the rport is now dead. The LLDD will @@ -3078,7 +3094,7 @@ fc_timeout_deleted_rport(struct work_struct *work) * * Note: we set the CALLBK_DONE flag above to correspond */ - if (i->f->dev_loss_tmo_callbk) + if (do_callback && i->f->dev_loss_tmo_callbk) i->f->dev_loss_tmo_callbk(rport); } -- cgit v1.2.3-59-g8ed1b From f8ceafde6f5bf6b4b7087c7f5e9da1b2a5284a2e Mon Sep 17 00:00:00 2001 From: Jing Huang Date: Fri, 25 Sep 2009 12:29:54 -0700 Subject: [SCSI] bfa: fixed checkpatch errors for bfad files This patch fixes checkpatch errors/warnings in bfad files. Signed-off-by: Jing Huang Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_cb_ioim_macros.h | 10 +- drivers/scsi/bfa/bfa_cee.c | 2 +- drivers/scsi/bfa/bfa_csdebug.c | 4 +- drivers/scsi/bfa/bfa_fcpim.c | 4 +- drivers/scsi/bfa/bfa_fcpim_priv.h | 4 +- drivers/scsi/bfa/bfa_fcport.c | 59 ++++--- drivers/scsi/bfa/bfa_fcs_lport.c | 11 +- drivers/scsi/bfa/bfa_fcxp.c | 8 +- drivers/scsi/bfa/bfa_intr.c | 2 +- drivers/scsi/bfa/bfa_intr_priv.h | 18 +- drivers/scsi/bfa/bfa_ioc.c | 10 +- drivers/scsi/bfa/bfa_ioc.h | 12 +- drivers/scsi/bfa/bfa_iocfc.c | 8 +- drivers/scsi/bfa/bfa_iocfc.h | 8 +- drivers/scsi/bfa/bfa_ioim.c | 4 +- drivers/scsi/bfa/bfa_itnim.c | 6 +- drivers/scsi/bfa/bfa_log.c | 4 +- drivers/scsi/bfa/bfa_port_priv.h | 4 +- drivers/scsi/bfa/bfa_rport.c | 6 +- drivers/scsi/bfa/bfa_tskim.c | 9 +- drivers/scsi/bfa/bfa_uf.c | 2 +- drivers/scsi/bfa/bfad.c | 6 +- drivers/scsi/bfa/bfad_fwimg.c | 8 +- drivers/scsi/bfa/bfad_im.c | 2 +- drivers/scsi/bfa/bfad_im_compat.h | 2 +- drivers/scsi/bfa/bfad_intr.c | 3 +- drivers/scsi/bfa/fabric.c | 18 +- drivers/scsi/bfa/fcbuild.c | 190 ++++++++++----------- drivers/scsi/bfa/fcbuild.h | 6 +- drivers/scsi/bfa/fcpim.c | 7 +- drivers/scsi/bfa/fcs.h | 2 +- drivers/scsi/bfa/fdmi.c | 8 +- drivers/scsi/bfa/include/aen/bfa_aen.h | 2 +- drivers/scsi/bfa/include/bfa.h | 10 +- drivers/scsi/bfa/include/bfa_svc.h | 8 +- drivers/scsi/bfa/include/bfi/bfi.h | 12 +- drivers/scsi/bfa/include/bfi/bfi_ioc.h | 2 +- drivers/scsi/bfa/include/bfi/bfi_lps.h | 4 +- drivers/scsi/bfa/include/bfi/bfi_rport.h | 8 +- drivers/scsi/bfa/include/cs/bfa_checksum.h | 6 +- drivers/scsi/bfa/include/cs/bfa_sm.h | 6 +- drivers/scsi/bfa/include/cs/bfa_trc.h | 2 +- drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h | 2 +- drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h | 17 +- drivers/scsi/bfa/include/protocol/ct.h | 10 +- drivers/scsi/bfa/include/protocol/fc.h | 22 +-- drivers/scsi/bfa/loop.c | 233 ++------------------------ drivers/scsi/bfa/lport_api.c | 15 +- drivers/scsi/bfa/ns.c | 5 +- drivers/scsi/bfa/plog.c | 2 +- drivers/scsi/bfa/rport_ftrs.c | 28 ++-- drivers/scsi/bfa/vfapi.c | 2 +- drivers/scsi/bfa/vport.c | 20 +-- 53 files changed, 324 insertions(+), 539 deletions(-) diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h index 0050c838c358..961fe439daad 100644 --- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h +++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h @@ -51,7 +51,7 @@ bfad_int_to_lun(u32 luno) lun.bfa_lun = 0; lun.scsi_lun[0] = bfa_os_htons(luno); - return (lun.bfa_lun); + return lun.bfa_lun; } /** @@ -68,7 +68,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - return ((u8 *) cmnd->cmnd); + return (u8 *) cmnd->cmnd; } /** @@ -97,7 +97,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - return (scsi_bufflen(cmnd)); + return scsi_bufflen(cmnd); } /** @@ -129,7 +129,7 @@ bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid) sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid; addr = (u64) sg_dma_address(sge); - return (*(union bfi_addr_u *) &addr); + return *((union bfi_addr_u *) &addr); } static inline u32 @@ -197,7 +197,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - return (cmnd->cmd_len); + return cmnd->cmd_len; } diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c index 7a959c34e789..2b917792c6bc 100644 --- a/drivers/scsi/bfa/bfa_cee.c +++ b/drivers/scsi/bfa/bfa_cee.c @@ -228,7 +228,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) u32 bfa_cee_meminfo(void) { - return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo()); + return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); } /** diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c index 1b71d349451a..caeb1143a4e6 100644 --- a/drivers/scsi/bfa/bfa_csdebug.c +++ b/drivers/scsi/bfa/bfa_csdebug.c @@ -47,12 +47,12 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) tqe = bfa_q_next(q); while (tqe != q) { if (tqe == qe) - return (1); + return 1; tqe = bfa_q_next(tqe); if (tqe == NULL) break; } - return (0); + return 0; } diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 401babe3494e..790c945aeae6 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -131,7 +131,7 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - return (fcpim->path_tov / 1000); + return fcpim->path_tov / 1000; } bfa_status_t @@ -169,7 +169,7 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - return (fcpim->q_depth); + return fcpim->q_depth; } diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h index 153206cfb37a..5cf418460f75 100644 --- a/drivers/scsi/bfa/bfa_fcpim_priv.h +++ b/drivers/scsi/bfa/bfa_fcpim_priv.h @@ -35,7 +35,7 @@ #define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ #define bfa_fcpim_stats(__fcpim, __stats) \ - (__fcpim)->stats.__stats ++ + ((__fcpim)->stats.__stats++) struct bfa_fcpim_mod_s { struct bfa_s *bfa; @@ -143,7 +143,7 @@ struct bfa_itnim_s { struct bfa_itnim_hal_stats_s stats; }; -#define bfa_itnim_is_online(_itnim) (_itnim)->is_online +#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ (&fcpim->ioim_arr[_iotag]) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index 992435987deb..aef648b55dfc 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -388,32 +388,29 @@ bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); - if (BFA_PORT_IS_DISABLED(pport->bfa)) { + if (BFA_PORT_IS_DISABLED(pport->bfa)) bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); - } else { + else bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); - } break; case BFA_PPORT_SM_STOP: bfa_sm_set_state(pport, bfa_pport_sm_stopped); bfa_pport_reset_linkinfo(pport); - if (BFA_PORT_IS_DISABLED(pport->bfa)) { + if (BFA_PORT_IS_DISABLED(pport->bfa)) bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); - } else { + else bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); - } break; case BFA_PPORT_SM_HWFAIL: bfa_sm_set_state(pport, bfa_pport_sm_iocdown); bfa_pport_reset_linkinfo(pport); bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); - if (BFA_PORT_IS_DISABLED(pport->bfa)) { + if (BFA_PORT_IS_DISABLED(pport->bfa)) bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); - } else { + else bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); - } break; default: @@ -999,10 +996,10 @@ bfa_pport_enable(struct bfa_s *bfa) struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); if (pport->diag_busy) - return (BFA_STATUS_DIAG_BUSY); + return BFA_STATUS_DIAG_BUSY; else if (bfa_sm_cmp_state (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); return BFA_STATUS_OK; @@ -1032,7 +1029,7 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) pport->cfg.speed = speed; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } /** @@ -1068,7 +1065,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) } pport->cfg.topology = topology; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } /** @@ -1094,7 +1091,7 @@ bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) pport->cfg.cfg_hardalpa = BFA_TRUE; pport->cfg.hardalpa = alpa; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_status_t @@ -1106,7 +1103,7 @@ bfa_pport_clr_hardalpa(struct bfa_s *bfa) bfa_trc(bfa, pport->cfg.hardalpa); pport->cfg.cfg_hardalpa = BFA_FALSE; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_boolean_t @@ -1138,16 +1135,16 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) * with in range */ if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) - return (BFA_STATUS_INVLD_DFSZ); + return BFA_STATUS_INVLD_DFSZ; /* * power of 2, if not the max frame size of 2112 */ if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) - return (BFA_STATUS_INVLD_DFSZ); + return BFA_STATUS_INVLD_DFSZ; pport->cfg.maxfrsize = maxfrsize; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } u16 @@ -1415,7 +1412,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, if (port->stats_busy) { bfa_trc(bfa, port->stats_busy); - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; } port->stats_busy = BFA_TRUE; @@ -1427,7 +1424,7 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, BFA_PORT_STATS_TOV); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_status_t @@ -1437,7 +1434,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) if (port->stats_busy) { bfa_trc(bfa, port->stats_busy); - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; } port->stats_busy = BFA_TRUE; @@ -1448,7 +1445,7 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, BFA_PORT_STATS_TOV); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_status_t @@ -1515,7 +1512,7 @@ bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, /* * QoS stats is embedded in port stats */ - return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg)); + return bfa_pport_get_stats(bfa, stats, cbfn, cbarg); } bfa_status_t @@ -1525,7 +1522,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) if (port->stats_busy) { bfa_trc(bfa, port->stats_busy); - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; } port->stats_busy = BFA_TRUE; @@ -1536,7 +1533,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, BFA_PORT_STATS_TOV); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } /** @@ -1545,7 +1542,7 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa) { - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_boolean_t @@ -1562,8 +1559,8 @@ bfa_pport_is_disabled(struct bfa_s *bfa) { struct bfa_pport_s *port = BFA_PORT_MOD(bfa); - return (bfa_sm_to_state(hal_pport_sm_table, port->sm) == - BFA_PPORT_ST_DISABLED); + return bfa_sm_to_state(hal_pport_sm_table, port->sm) == + BFA_PPORT_ST_DISABLED; } @@ -1572,7 +1569,7 @@ bfa_pport_is_ratelim(struct bfa_s *bfa) { struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); -return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE); + return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; } @@ -1620,7 +1617,7 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) pport->cfg.trl_def_speed = speed; - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } /** @@ -1632,7 +1629,7 @@ bfa_pport_get_ratelim_speed(struct bfa_s *bfa) struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); bfa_trc(bfa, pport->cfg.trl_def_speed); - return (pport->cfg.trl_def_speed); + return pport->cfg.trl_def_speed; } diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 8975ed041dc0..c7ab257f10a7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -568,11 +568,10 @@ bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port) __port_action[port->fabric->fab_type].offline(port); - if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) { + if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); - } else { + else bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); - } bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, port->fabric->vf_drv, (port->vport == NULL) ? NULL : port->vport->vport_drv); @@ -777,7 +776,7 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) } bfa_trc(port->fcs, pwwn); - return (NULL); + return NULL; } /** @@ -796,7 +795,7 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) } bfa_trc(port->fcs, nwwn); - return (NULL); + return NULL; } /** @@ -870,7 +869,7 @@ bfa_fcs_port_lip(struct bfa_fcs_port_s *port) bfa_boolean_t bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) { - return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)); + return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online); } /** diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c index 4754a0e9006a..cf0ad6782686 100644 --- a/drivers/scsi/bfa/bfa_fcxp.c +++ b/drivers/scsi/bfa/bfa_fcxp.c @@ -199,7 +199,7 @@ bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) if (fcxp) list_add_tail(&fcxp->qe, &fm->fcxp_active_q); - return (fcxp); + return fcxp; } static void @@ -503,7 +503,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); if (fcxp == NULL) - return (NULL); + return NULL; bfa_trc(bfa, fcxp->fcxp_tag); @@ -568,7 +568,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, } } - return (fcxp); + return fcxp; } /** @@ -709,7 +709,7 @@ bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) { bfa_assert(0); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } void diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c index 0ca125712a04..b36540e4ed76 100644 --- a/drivers/scsi/bfa/bfa_intr.c +++ b/drivers/scsi/bfa/bfa_intr.c @@ -59,7 +59,7 @@ bfa_intx(struct bfa_s *bfa) qintr = intr & __HFN_INT_RME_MASK; bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); - for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) { + for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { if (intr & (__HFN_INT_RME_Q0 << queue)) bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); } diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h index 8ce6e6b105c8..5fc301cf4d1b 100644 --- a/drivers/scsi/bfa/bfa_intr_priv.h +++ b/drivers/scsi/bfa/bfa_intr_priv.h @@ -26,9 +26,9 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); -#define bfa_reqq_pi(__bfa, __reqq) (__bfa)->iocfc.req_cq_pi[__reqq] +#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq]) #define bfa_reqq_ci(__bfa, __reqq) \ - *(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva) + (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)) #define bfa_reqq_full(__bfa, __reqq) \ (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ @@ -50,14 +50,16 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); } while (0) #define bfa_rspq_pi(__bfa, __rspq) \ - *(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva) + (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) -#define bfa_rspq_ci(__bfa, __rspq) (__bfa)->iocfc.rsp_cq_ci[__rspq] +#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq]) #define bfa_rspq_elem(__bfa, __rspq, __ci) \ - &((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci] + (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]) -#define CQ_INCR(__index, __size) \ - (__index)++; (__index) &= ((__size) - 1) +#define CQ_INCR(__index, __size) do { \ + (__index)++; \ + (__index) &= ((__size) - 1); \ +} while (0) /** * Queue element to wait for room in request queue. FIFO order is @@ -94,7 +96,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), wqe->cbarg = cbarg; } -#define bfa_reqq(__bfa, __reqq) &(__bfa)->reqq_waitq[__reqq] +#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) /** * static inline void diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 149348934ce3..397d7e9eade5 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -51,7 +51,7 @@ BFA_TRC_FILE(HAL, IOC); (sizeof(struct bfa_trc_mod_s) - \ BFA_TRC_MAX * sizeof(struct bfa_trc_s))) #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) -#define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++ +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) @@ -1953,8 +1953,8 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) { - return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) - || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)); + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) + || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } /** @@ -1963,9 +1963,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) { - return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) - || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch)); + || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); } #define bfa_ioc_state_disabled(__sm) \ diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 58efd4b13143..7c30f05ab137 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -179,16 +179,16 @@ struct bfa_ioc_s { struct bfa_ioc_mbox_mod_s mbox_mod; }; -#define bfa_ioc_pcifn(__ioc) (__ioc)->pcidev.pci_func -#define bfa_ioc_devid(__ioc) (__ioc)->pcidev.device_id -#define bfa_ioc_bar0(__ioc) (__ioc)->pcidev.pci_bar_kva +#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) +#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) +#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) #define bfa_ioc_fetch_stats(__ioc, __stats) \ - ((__stats)->drv_stats) = (__ioc)->stats + (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) -#define bfa_ioc_maxfrsize(__ioc) (__ioc)->attr->maxfrsize -#define bfa_ioc_rx_bbcredit(__ioc) (__ioc)->attr->rx_bbcredit +#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) +#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) #define bfa_ioc_speed_sup(__ioc) \ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index 12350b022d63..d7ab792a9e54 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -794,7 +794,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, if (iocfc->stats_busy) { bfa_trc(bfa, iocfc->stats_busy); - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; } iocfc->stats_busy = BFA_TRUE; @@ -804,7 +804,7 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, bfa_iocfc_stats_query(bfa); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } bfa_status_t @@ -814,7 +814,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) if (iocfc->stats_busy) { bfa_trc(bfa, iocfc->stats_busy); - return (BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; } iocfc->stats_busy = BFA_TRUE; @@ -822,7 +822,7 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) iocfc->stats_cbarg = cbarg; bfa_iocfc_stats_clear(bfa); - return (BFA_STATUS_OK); + return BFA_STATUS_OK; } /** diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h index 7ad177ed4cfc..ce9a830a4207 100644 --- a/drivers/scsi/bfa/bfa_iocfc.h +++ b/drivers/scsi/bfa/bfa_iocfc.h @@ -107,13 +107,13 @@ struct bfa_iocfc_s { #define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) #define bfa_msix_init(__bfa, __nvecs) \ - (__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs) + ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) #define bfa_msix_install(__bfa) \ - (__bfa)->iocfc.hwif.hw_msix_install(__bfa) + ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) #define bfa_msix_uninstall(__bfa) \ - (__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa) + ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) #define bfa_isr_mode_set(__bfa, __msix) \ - (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix) + ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c index 7ae2552e1e14..f81d359b7089 100644 --- a/drivers/scsi/bfa/bfa_ioim.c +++ b/drivers/scsi/bfa/bfa_ioim.c @@ -105,13 +105,13 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, - &ioim->fcpim->ioim_comp_q); + &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_pathtov, ioim); } else { list_del(&ioim->qe); list_add_tail(&ioim->qe, - &ioim->itnim->pending_q); + &ioim->itnim->pending_q); } break; } diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c index 4d5c61a4f85c..eabf7d38bd09 100644 --- a/drivers/scsi/bfa/bfa_itnim.c +++ b/drivers/scsi/bfa/bfa_itnim.c @@ -1029,7 +1029,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) bfa_stats(itnim, creates); bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); - return (itnim); + return itnim; } void @@ -1061,7 +1061,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim) bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim) { - return ( + return itnim->fcpim->path_tov && itnim->iotov_active && (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || @@ -1069,7 +1069,7 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim) bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) -); + ; } void diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c index c2735e55cf03..e7514016c9c6 100644 --- a/drivers/scsi/bfa/bfa_log.c +++ b/drivers/scsi/bfa/bfa_log.c @@ -231,9 +231,9 @@ bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id) return BFA_LOG_INVALID; if (log_mod) - return (log_mod->log_level[mod_id]); + return log_mod->log_level[mod_id]; else - return (bfa_log_info[mod_id].level); + return bfa_log_info[mod_id].level; } enum bfa_log_severity diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h index 4b97e2759908..51f698a06b6d 100644 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ b/drivers/scsi/bfa/bfa_port_priv.h @@ -59,8 +59,8 @@ struct bfa_pport_s { u8 *stats_kva; u64 stats_pa; union bfa_pport_stats_u *stats; /* pport stats */ - u32 mypid : 24; - u32 rsvd_b : 8; + u32 mypid:24; + u32 rsvd_b:8; struct bfa_timer_s timer; /* timer */ union bfa_pport_stats_u *stats_ret; /* driver stats location */ diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c index 16da77a8db28..3e1990a74258 100644 --- a/drivers/scsi/bfa/bfa_rport.c +++ b/drivers/scsi/bfa/bfa_rport.c @@ -677,7 +677,7 @@ bfa_rport_alloc(struct bfa_rport_mod_s *mod) if (rport) list_add_tail(&rport->qe, &mod->rp_active_q); - return (rport); + return rport; } static void @@ -834,7 +834,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv) rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); if (rp == NULL) - return (NULL); + return NULL; rp->bfa = bfa; rp->rport_drv = rport_drv; @@ -843,7 +843,7 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv) bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); - return (rp); + return rp; } void diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c index 010d40d1e5d3..ff7a4dc0bf3c 100644 --- a/drivers/scsi/bfa/bfa_tskim.c +++ b/drivers/scsi/bfa/bfa_tskim.c @@ -23,13 +23,14 @@ BFA_TRC_FILE(HAL, TSKIM); /** * task management completion handling */ -#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ - bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \ +#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ + bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \ + __cbfn, (__tskim)); \ bfa_tskim_notify_comp(__tskim); \ } while (0) -#define bfa_tskim_notify_comp(__tskim) do { \ - if ((__tskim)->notify) \ +#define bfa_tskim_notify_comp(__tskim) do { \ + if ((__tskim)->notify) \ bfa_itnim_tskdone((__tskim)->itnim); \ } while (0) diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c index ff5f9deb1b22..4b3c2417d180 100644 --- a/drivers/scsi/bfa/bfa_uf.c +++ b/drivers/scsi/bfa/bfa_uf.c @@ -185,7 +185,7 @@ bfa_uf_get(struct bfa_uf_mod_s *uf_mod) struct bfa_uf_s *uf; bfa_q_deq(&uf_mod->uf_free_q, &uf); - return (uf); + return uf; } static void diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 6f2be5abf561..b52b773d49d9 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -188,8 +188,8 @@ static struct bfad_port_s * bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { - return ((vp_drv) ? (&(vp_drv)->drv_port) - : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport))); + return (vp_drv) ? (&(vp_drv)->drv_port) + : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); } struct bfad_port_s * @@ -716,7 +716,7 @@ bfad_drv_init(struct bfad_s *bfad) if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) { printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", - __FUNCTION__, bfad->inst_no); + __func__, bfad->inst_no); } bfad_init_timer(bfad); diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c index b2f6949bc8d3..2c2082d6ce45 100644 --- a/drivers/scsi/bfa/bfad_fwimg.c +++ b/drivers/scsi/bfa/bfad_fwimg.c @@ -63,10 +63,10 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); - return(*bfi_image); + return *bfi_image; error: - return(NULL); + return NULL; } u32 * @@ -76,12 +76,12 @@ bfad_get_firmware_buf(struct pci_dev *pdev) if (bfi_image_ct_size == 0) bfad_read_firmware(pdev, &bfi_image_ct, &bfi_image_ct_size, BFAD_FW_FILE_CT); - return(bfi_image_ct); + return bfi_image_ct; } else { if (bfi_image_cb_size == 0) bfad_read_firmware(pdev, &bfi_image_cb, &bfi_image_cb_size, BFAD_FW_FILE_CB); - return(bfi_image_cb); + return bfi_image_cb; } } diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 158c99243c08..ae3a0689a66f 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -1050,7 +1050,7 @@ bfad_im_itnim_work_handler(struct work_struct *work) } else { printk(KERN_WARNING "%s: itnim %llx is already in online state\n", - __FUNCTION__, + __func__, bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); } diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h index 1d3e74ec338c..b36be15044a4 100644 --- a/drivers/scsi/bfa/bfad_im_compat.h +++ b/drivers/scsi/bfa/bfad_im_compat.h @@ -31,7 +31,7 @@ u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, static inline u32 * bfad_load_fwimg(struct pci_dev *pdev) { - return(bfad_get_firmware_buf(pdev)); + return bfad_get_firmware_buf(pdev); } static inline void diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c index f104e029cac9..7de8832f6fee 100644 --- a/drivers/scsi/bfa/bfad_intr.c +++ b/drivers/scsi/bfa/bfad_intr.c @@ -23,13 +23,12 @@ BFA_TRC_FILE(LDRV, INTR); /** * bfa_isr BFA driver interrupt functions */ -irqreturn_t bfad_intx(int irq, void *dev_id); static int msix_disable; module_param(msix_disable, int, S_IRUGO | S_IWUSR); /** * Line based interrupt handler. */ -irqreturn_t +static irqreturn_t bfad_intx(int irq, void *dev_id) { struct bfad_s *bfad = dev_id; diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index a8b14c47b009..a4b5dd449573 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -36,12 +36,12 @@ BFA_TRC_FILE(FCS, FABRIC); #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ -#define bfa_fcs_fabric_set_opertype(__fabric) do { \ - if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ - == BFA_PPORT_TOPOLOGY_P2P) \ - (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ - else \ - (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ +#define bfa_fcs_fabric_set_opertype(__fabric) do { \ + if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ + == BFA_PPORT_TOPOLOGY_P2P) \ + (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ + else \ + (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ } while (0) /* @@ -887,7 +887,7 @@ bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs) bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) { - return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback)); + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); } enum bfa_pport_type @@ -974,7 +974,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) { - return (bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online)); + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); } @@ -1015,7 +1015,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) { - return (fabric->num_vports); + return fabric->num_vports; } /** diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c index d174706b9caa..fee5456451cb 100644 --- a/drivers/scsi/bfa/fcbuild.c +++ b/drivers/scsi/bfa/fcbuild.c @@ -188,14 +188,14 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len) switch (els_cmd->els_code) { case FC_ELS_LS_RJT: if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) - return (FC_PARSE_BUSY); + return FC_PARSE_BUSY; else - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; case FC_ELS_ACC: - return (FC_PARSE_OK); + return FC_PARSE_OK; } - return (FC_PARSE_OK); + return FC_PARSE_OK; } static void @@ -228,7 +228,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); - return (sizeof(struct fc_logi_s)); + return sizeof(struct fc_logi_s); } u16 @@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); - return (sizeof(struct fc_logi_s)); + return sizeof(struct fc_logi_s); } u16 @@ -287,7 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, flogi->csp.bbcred = bfa_os_htons(local_bb_credits); - return (sizeof(struct fc_logi_s)); + return sizeof(struct fc_logi_s); } u16 @@ -306,7 +306,7 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, flogi->port_name = port_name; flogi->node_name = node_name; - return (sizeof(struct fc_logi_s)); + return sizeof(struct fc_logi_s); } u16 @@ -338,26 +338,26 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) - return (FC_PARSE_BUSY); + return FC_PARSE_BUSY; else - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; case FC_ELS_ACC: plogi = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!wwn_is_equal(plogi->port_name, port_name)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!plogi->class3.class_valid) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; - return (FC_PARSE_OK); + return FC_PARSE_OK; default: - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; } } @@ -372,7 +372,7 @@ fc_plogi_parse(struct fchs_s *fchs) if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) || (plogi->class3.rxsz == 0)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; return FC_PARSE_OK; } @@ -393,7 +393,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, prli->parampage.servparams.task_retry_id = 0; prli->parampage.servparams.confirm = 1; - return (sizeof(struct fc_prli_s)); + return sizeof(struct fc_prli_s); } u16 @@ -414,41 +414,41 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, prli->parampage.rspcode = FC_PRLI_ACC_XQTD; - return (sizeof(struct fc_prli_s)); + return sizeof(struct fc_prli_s); } enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len) { if (len < sizeof(struct fc_prli_s)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (prli->command != FC_ELS_ACC) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (prli->parampage.servparams.target != 1) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; - return (FC_PARSE_OK); + return FC_PARSE_OK; } enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli) { if (prli->parampage.type != FC_TYPE_FCP) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!prli->parampage.imagepair) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!prli->parampage.servparams.initiator) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; - return (FC_PARSE_OK); + return FC_PARSE_OK; } u16 @@ -462,7 +462,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, logo->nport_id = (s_id); logo->orig_port_name = port_name; - return (sizeof(struct fc_logo_s)); + return sizeof(struct fc_logo_s); } static u16 @@ -484,7 +484,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, adisc->orig_node_name = node_name; adisc->nport_id = (s_id); - return (sizeof(struct fc_adisc_s)); + return sizeof(struct fc_adisc_s); } u16 @@ -511,15 +511,15 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, { if (len < sizeof(struct fc_adisc_s)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (adisc->els_cmd.els_code != FC_ELS_ACC) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!wwn_is_equal(adisc->orig_port_name, port_name)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; - return (FC_PARSE_OK); + return FC_PARSE_OK; } enum fc_parse_status @@ -529,14 +529,14 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; if (adisc->els_cmd.els_code != FC_ELS_ACC) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if ((adisc->nport_id == (host_dap)) && wwn_is_equal(adisc->orig_port_name, port_name) && wwn_is_equal(adisc->orig_node_name, node_name)) - return (FC_PARSE_OK); + return FC_PARSE_OK; - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; } enum fc_parse_status @@ -550,13 +550,13 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) if ((bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ - sizeof(struct fchs_s))) || (pdisc->class3.rxsz == 0)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->port_name, port_name)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->node_name, node_name)) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; return FC_PARSE_OK; } @@ -570,7 +570,7 @@ fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) fchs->s_id = (s_id); fchs->ox_id = bfa_os_htons(ox_id); - return (sizeof(struct fchs_s)); + return sizeof(struct fchs_s); } enum fc_parse_status @@ -578,9 +578,9 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len) { if ((fchs->cat_info == FC_CAT_BA_ACC) || (fchs->cat_info == FC_CAT_BA_RJT)) - return (FC_PARSE_OK); + return FC_PARSE_OK; - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; } u16 @@ -597,7 +597,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, rrq->ox_id = bfa_os_htons(rrq_oxid); rrq->rx_id = FC_RXID_ANY; - return (sizeof(struct fc_rrq_s)); + return sizeof(struct fc_rrq_s); } u16 @@ -611,7 +611,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, memset(acc, 0, sizeof(struct fc_els_cmd_s)); acc->els_code = FC_ELS_ACC; - return (sizeof(struct fc_els_cmd_s)); + return sizeof(struct fc_els_cmd_s); } u16 @@ -627,7 +627,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, ls_rjt->reason_code_expl = reason_code_expl; ls_rjt->vendor_unique = 0x00; - return (sizeof(struct fc_ls_rjt_s)); + return sizeof(struct fc_ls_rjt_s); } u16 @@ -643,7 +643,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, ba_acc->ox_id = fchs->ox_id; ba_acc->rx_id = fchs->rx_id; - return (sizeof(struct fc_ba_acc_s)); + return sizeof(struct fc_ba_acc_s); } u16 @@ -654,7 +654,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); els_cmd->els_code = FC_ELS_ACC; - return (sizeof(struct fc_els_cmd_s)); + return sizeof(struct fc_els_cmd_s); } int @@ -696,7 +696,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; } - return (bfa_os_ntohs(tprlo_acc->payload_len)); + return bfa_os_ntohs(tprlo_acc->payload_len); } u16 @@ -721,7 +721,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, prlo_acc->prlo_acc_params[page].resp_process_assc = 0; } - return (bfa_os_ntohs(prlo_acc->payload_len)); + return bfa_os_ntohs(prlo_acc->payload_len); } u16 @@ -735,7 +735,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, rnid->els_cmd.els_code = FC_ELS_RNID; rnid->node_id_data_format = data_format; - return (sizeof(struct fc_rnid_cmd_s)); + return sizeof(struct fc_rnid_cmd_s); } u16 @@ -759,10 +759,10 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, rnid_acc->specific_id_data_length = sizeof(struct fc_rnid_general_topology_data_s); bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); - return (sizeof(struct fc_rnid_acc_s)); + return sizeof(struct fc_rnid_acc_s); } else { - return (sizeof(struct fc_rnid_acc_s) - - sizeof(struct fc_rnid_general_topology_data_s)); + return sizeof(struct fc_rnid_acc_s) - + sizeof(struct fc_rnid_general_topology_data_s); } } @@ -776,7 +776,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); rpsc->els_cmd.els_code = FC_ELS_RPSC; - return (sizeof(struct fc_rpsc_cmd_s)); + return sizeof(struct fc_rpsc_cmd_s); } u16 @@ -797,8 +797,8 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, for (i = 0; i < npids; i++) rpsc2->pid_list[i].pid = pid_list[i]; - return (sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * - (sizeof(u32)))); + return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * + (sizeof(u32))); } u16 @@ -819,7 +819,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, rpsc_acc->speed_info[0].port_op_speed = bfa_os_htons(oper_speed->port_op_speed); - return (sizeof(struct fc_rpsc_acc_s)); + return sizeof(struct fc_rpsc_acc_s); } @@ -856,7 +856,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, pdisc->port_name = port_name; pdisc->node_name = node_name; - return (sizeof(struct fc_logi_s)); + return sizeof(struct fc_logi_s); } u16 @@ -865,21 +865,21 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) - return (FC_PARSE_LEN_INVAL); + return FC_PARSE_LEN_INVAL; if (pdisc->els_cmd.els_code != FC_ELS_ACC) - return (FC_PARSE_ACC_INVAL); + return FC_PARSE_ACC_INVAL; if (!wwn_is_equal(pdisc->port_name, port_name)) - return (FC_PARSE_PWWN_NOT_EQUAL); + return FC_PARSE_PWWN_NOT_EQUAL; if (!pdisc->class3.class_valid) - return (FC_PARSE_NWWN_NOT_EQUAL); + return FC_PARSE_NWWN_NOT_EQUAL; if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) - return (FC_PARSE_RXSZ_INVAL); + return FC_PARSE_RXSZ_INVAL; - return (FC_PARSE_OK); + return FC_PARSE_OK; } u16 @@ -903,7 +903,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, prlo->prlo_params[page].resp_process_assc = 0; } - return (bfa_os_ntohs(prlo->payload_len)); + return bfa_os_ntohs(prlo->payload_len); } u16 @@ -916,7 +916,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len) len = len; if (prlo->command != FC_ELS_ACC) - return (FC_PARSE_FAILURE); + return FC_PARSE_FAILURE; num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; @@ -936,7 +936,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len) if (prlo->prlo_acc_params[page].resp_process_assc != 0) return FC_PARSE_FAILURE; } - return (FC_PARSE_OK); + return FC_PARSE_OK; } @@ -968,7 +968,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, } } - return (bfa_os_ntohs(tprlo->payload_len)); + return bfa_os_ntohs(tprlo->payload_len); } u16 @@ -981,23 +981,23 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) len = len; if (tprlo->command != FC_ELS_ACC) - return (FC_PARSE_ACC_INVAL); + return FC_PARSE_ACC_INVAL; num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; for (page = 0; page < num_pages; page++) { if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) - return (FC_PARSE_NOT_FCP); + return FC_PARSE_NOT_FCP; if (tprlo->tprlo_acc_params[page].opa_valid != 0) - return (FC_PARSE_OPAFLAG_INVAL); + return FC_PARSE_OPAFLAG_INVAL; if (tprlo->tprlo_acc_params[page].rpa_valid != 0) - return (FC_PARSE_RPAFLAG_INVAL); + return FC_PARSE_RPAFLAG_INVAL; if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) - return (FC_PARSE_OPA_INVAL); + return FC_PARSE_OPA_INVAL; if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) - return (FC_PARSE_RPA_INVAL); + return FC_PARSE_RPA_INVAL; } - return (FC_PARSE_OK); + return FC_PARSE_OK; } enum fc_parse_status @@ -1024,7 +1024,7 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, fchs->cat_info = FC_CAT_BA_RJT; ba_rjt->reason_code = reason_code; ba_rjt->reason_expl = reason_expl; - return (sizeof(struct fc_ba_rjt_s)); + return sizeof(struct fc_ba_rjt_s); } static void @@ -1073,7 +1073,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); gidpn->port_name = port_name; - return (sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1090,7 +1090,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); gpnid->dap = port_id; - return (sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s)); + return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); } u16 @@ -1107,7 +1107,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); gnnid->dap = port_id; - return (sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s)); + return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); } u16 @@ -1137,7 +1137,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, if (set_br_reg) scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; - return (sizeof(struct fc_scr_s)); + return sizeof(struct fc_scr_s); } u16 @@ -1157,7 +1157,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, rscn->event[0].format = FC_RSCN_FORMAT_PORTID; rscn->event[0].portid = s_id; - return (sizeof(struct fc_rscn_pl_s)); + return sizeof(struct fc_rscn_pl_s); } u16 @@ -1188,7 +1188,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, rftid->fc4_type[index] |= bfa_os_htonl(type_value); } - return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1210,7 +1210,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, (bitmap_size < 32 ? bitmap_size : 32)); - return (sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1231,7 +1231,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, rffid->fc4ftr_bits = fc4_ftrs; rffid->fc4_type = fc4_type; - return (sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1253,7 +1253,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, rspnid->spn_len = (u8) strlen((char *)name); strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); - return (sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1275,7 +1275,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, gidft->domain_id = 0; gidft->area_id = 0; - return (sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1294,7 +1294,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, rpnid->port_id = port_id; rpnid->port_name = port_name; - return (sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1313,7 +1313,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, rnnid->port_id = port_id; rnnid->node_name = node_name; - return (sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1332,7 +1332,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, rcsid->port_id = port_id; rcsid->cos = cos; - return (sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1351,7 +1351,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, rptid->port_id = port_id; rptid->port_type = port_type; - return (sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s)); + return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); } u16 @@ -1368,7 +1368,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); ganxt->port_id = port_id; - return (sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s)); + return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); } /* @@ -1385,7 +1385,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); - return (sizeof(struct ct_hdr_s)); + return sizeof(struct ct_hdr_s); } /* @@ -1425,7 +1425,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); gmal->wwn = wwn; - return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t)); + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); } /* @@ -1445,5 +1445,5 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); gfn->wwn = wwn; - return (sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t)); + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); } diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h index 4d248424f7b3..8fa7f270ef7b 100644 --- a/drivers/scsi/bfa/fcbuild.h +++ b/drivers/scsi/bfa/fcbuild.h @@ -32,8 +32,8 @@ * Utility Macros/functions */ -#define fcif_sof_set(_ifhdr, _sof) (_ifhdr)->sof = FC_ ## _sof -#define fcif_eof_set(_ifhdr, _eof) (_ifhdr)->eof = FC_ ## _eof +#define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof) +#define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof) #define wwn_is_equal(_wwn1, _wwn2) \ (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) @@ -49,7 +49,7 @@ static inline u32 fc_get_ctresp_pyld_len(u32 resp_len) { - return (resp_len - sizeof(struct ct_hdr_s)); + return resp_len - sizeof(struct ct_hdr_s); } /* diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index 8ce5d8934677..1f3c06efaa9e 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c @@ -286,11 +286,10 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_fcb_itnim_offline(itnim->itnim_drv); bfa_itnim_offline(itnim->bfa_itnim); - if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) { + if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); - } else { + else bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); - } break; case BFA_FCS_ITNIM_SM_DELETE: @@ -732,7 +731,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) return NULL; bfa_assert(rport->itnim != NULL); - return (rport->itnim); + return rport->itnim; } bfa_status_t diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h index deee685e8478..8d08230e6295 100644 --- a/drivers/scsi/bfa/fcs.h +++ b/drivers/scsi/bfa/fcs.h @@ -23,7 +23,7 @@ #ifndef __FCS_H__ #define __FCS_H__ -#define __fcs_min_cfg(__fcs) (__fcs)->min_cfg +#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index b845eb272c78..df2a1e54e16b 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c @@ -72,9 +72,9 @@ static u16 bfa_fcs_port_fdmi_build_rpa_pyld( struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); static u16 bfa_fcs_port_fdmi_build_portattr_block( struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); -void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, +static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr); -void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, +static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr); /** * fcs_fdmi_sm FCS FDMI state machine @@ -1091,7 +1091,7 @@ bfa_fcs_port_fdmi_timeout(void *arg) bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); } -void +static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr) { @@ -1145,7 +1145,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, } -void +static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr) { diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h index da8cac093d3d..d9cbc2a783d4 100644 --- a/drivers/scsi/bfa/include/aen/bfa_aen.h +++ b/drivers/scsi/bfa/include/aen/bfa_aen.h @@ -54,7 +54,7 @@ bfa_aen_get_max_cfg_entry(void) static inline s32 bfa_aen_get_meminfo(void) { - return (sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry()); + return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); } static inline s32 diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h index 64c1412c5703..d4bc0d9fa42c 100644 --- a/drivers/scsi/bfa/include/bfa.h +++ b/drivers/scsi/bfa/include/bfa.h @@ -76,11 +76,11 @@ struct bfa_meminfo_s { struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; }; #define bfa_meminfo_kva(_m) \ - (_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp + ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp) #define bfa_meminfo_dma_virt(_m) \ - (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp + ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp) #define bfa_meminfo_dma_phys(_m) \ - (_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp + ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) /** * Generic Scatter Gather Element used by driver @@ -100,7 +100,7 @@ struct bfa_sge_s { /* * bfa stats interfaces */ -#define bfa_stats(_mod, _stats) (_mod)->stats._stats ++ +#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++) #define bfa_ioc_get_stats(__bfa, __ioc_stats) \ bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) @@ -136,7 +136,7 @@ void bfa_isr_enable(struct bfa_s *bfa); void bfa_isr_disable(struct bfa_s *bfa); void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit); -#define bfa_msix(__bfa, __vec) (__bfa)->msix.handler[__vec](__bfa, __vec) +#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec)) void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 0c80b74f72ef..268d956bad89 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -34,10 +34,10 @@ struct bfa_fcxp_s; */ struct bfa_rport_info_s { u16 max_frmsz; /* max rcv pdu size */ - u32 pid : 24, /* remote port ID */ - lp_tag : 8; - u32 local_pid : 24, /* local port ID */ - cisc : 8; /* CIRO supported */ + u32 pid:24, /* remote port ID */ + lp_tag:8; + u32 local_pid:24, /* local port ID */ + cisc:8; /* CIRO supported */ u8 fc_class; /* supported FC classes. enum fc_cos */ u8 vf_en; /* virtual fabric enable */ u16 vf_id; /* virtual fabric ID */ diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h index 6cadfe0d4ba1..7042c18e542d 100644 --- a/drivers/scsi/bfa/include/bfi/bfi.h +++ b/drivers/scsi/bfa/include/bfi/bfi.h @@ -93,13 +93,13 @@ union bfi_addr_u { */ struct bfi_sge_s { #ifdef __BIGENDIAN - u32 flags : 2, - rsvd : 2, - sg_len : 28; + u32 flags:2, + rsvd:2, + sg_len:28; #else - u32 sg_len : 28, - rsvd : 2, - flags : 2; + u32 sg_len:28, + rsvd:2, + flags:2; #endif union bfi_addr_u sga; }; diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h index 026e9c06ae97..96ef05670659 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h @@ -142,7 +142,7 @@ enum { BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ }; -#define BFI_ADAPTER_GETP(__prop,__adap_prop) \ +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ BFI_ADAPTER_ ## __prop ## _SH) #define BFI_ADAPTER_SETP(__prop, __val) \ diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h index 414b0e30f6ef..c59d47badb4b 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_lps.h +++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h @@ -55,8 +55,8 @@ struct bfi_lps_login_rsp_s { u16 bb_credit; u8 f_port; u8 npiv_en; - u32 lp_pid : 24; - u32 auth_req : 8; + u32 lp_pid:24; + u32 auth_req:8; mac_t lp_mac; mac_t fcf_mac; u8 ext_status; diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h index 3520f55f09d7..e1cd83b56ec6 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_rport.h +++ b/drivers/scsi/bfa/include/bfi/bfi_rport.h @@ -38,10 +38,10 @@ struct bfi_rport_create_req_s { struct bfi_mhdr_s mh; /* common msg header */ u16 bfa_handle; /* host rport handle */ u16 max_frmsz; /* max rcv pdu size */ - u32 pid : 24, /* remote port ID */ - lp_tag : 8; /* local port tag */ - u32 local_pid : 24, /* local port ID */ - cisc : 8; + u32 pid:24, /* remote port ID */ + lp_tag:8; /* local port tag */ + u32 local_pid:24, /* local port ID */ + cisc:8; u8 fc_class; /* supported FC classes */ u8 vf_en; /* virtual fabric enable */ u16 vf_id; /* virtual fabric ID */ diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h index af8c1d533ba8..650f8d0aaff9 100644 --- a/drivers/scsi/bfa/include/cs/bfa_checksum.h +++ b/drivers/scsi/bfa/include/cs/bfa_checksum.h @@ -31,7 +31,7 @@ bfa_checksum_u32(u32 *buf, int sz) for (i = 0; i < m; i++) sum ^= buf[i]; - return (sum); + return sum; } static inline u16 @@ -43,7 +43,7 @@ bfa_checksum_u16(u16 *buf, int sz) for (i = 0; i < m; i++) sum ^= buf[i]; - return (sum); + return sum; } static inline u8 @@ -55,6 +55,6 @@ bfa_checksum_u8(u8 *buf, int sz) for (i = 0; i < sz; i++) sum ^= buf[i]; - return (sum); + return sum; } #endif diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h index 9877066680a6..b0a92baf6657 100644 --- a/drivers/scsi/bfa/include/cs/bfa_sm.h +++ b/drivers/scsi/bfa/include/cs/bfa_sm.h @@ -24,8 +24,8 @@ typedef void (*bfa_sm_t)(void *sm, int event); -#define bfa_sm_set_state(_sm, _state) (_sm)->sm = (bfa_sm_t)(_state) -#define bfa_sm_send_event(_sm, _event) (_sm)->sm((_sm), (_event)) +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) #define bfa_sm_get_state(_sm) ((_sm)->sm) #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) @@ -62,7 +62,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event); } while (0) #define bfa_fsm_send_event(_fsm, _event) \ - (_fsm)->fsm((_fsm), (_event)) + ((_fsm)->fsm((_fsm), (_event))) #define bfa_fsm_cmp_state(_fsm, _state) \ ((_fsm)->fsm == (bfa_fsm_t)(_state)) diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h index 3e743928c74c..310771c888e7 100644 --- a/drivers/scsi/bfa/include/cs/bfa_trc.h +++ b/drivers/scsi/bfa/include/cs/bfa_trc.h @@ -24,7 +24,7 @@ #endif #ifndef BFA_TRC_TS -#define BFA_TRC_TS(_trcm) ((_trcm)->ticks ++) +#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) #endif struct bfa_trc_s { diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h index 4ffd2242d3de..08b79d5e46f3 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h @@ -75,7 +75,7 @@ struct bfa_fcs_fabric_s { */ }; -#define bfa_fcs_fabric_npiv_capable(__f) (__f)->is_npiv +#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) #define bfa_fcs_fabric_is_switched(__f) \ ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h index b85cba884b96..967ceb0eb074 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h @@ -125,12 +125,12 @@ union bfa_fcs_port_topo_u { struct bfa_fcs_port_s { struct list_head qe; /* used by port/vport */ bfa_sm_t sm; /* state machine */ - struct bfa_fcs_fabric_s *fabric; /* parent fabric */ - struct bfa_port_cfg_s port_cfg; /* port configuration */ + struct bfa_fcs_fabric_s *fabric;/* parent fabric */ + struct bfa_port_cfg_s port_cfg;/* port configuration */ struct bfa_timer_s link_timer; /* timer for link offline */ - u32 pid : 24; /* FC address */ - u8 lp_tag; /* lport tag */ - u16 num_rports; /* Num of r-ports */ + u32 pid:24; /* FC address */ + u8 lp_tag; /* lport tag */ + u16 num_rports; /* Num of r-ports */ struct list_head rport_q; /* queue of discovered r-ports */ struct bfa_fcs_s *fcs; /* FCS instance */ union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */ @@ -188,13 +188,14 @@ bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port) } -#define bfa_fcs_port_get_opertype(_lport) (_lport)->fabric->oper_type +#define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type) -#define bfa_fcs_port_get_fabric_name(_lport) (_lport)->fabric->fabric_name +#define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name) -#define bfa_fcs_port_get_fabric_ipaddr(_lport) (_lport)->fabric->fabric_ip_addr +#define bfa_fcs_port_get_fabric_ipaddr(_lport) \ + ((_lport)->fabric->fabric_ip_addr) /** * bfa fcs port public functions diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h index c59d6630b070..c8648bcba41a 100644 --- a/drivers/scsi/bfa/include/protocol/ct.h +++ b/drivers/scsi/bfa/include/protocol/ct.h @@ -193,11 +193,11 @@ struct fcgs_rftid_req_s { #define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 struct fcgs_rffid_req_s{ - u32 rsvd :8; - u32 dap :24; /* port identifier */ - u32 rsvd1 :16; - u32 fc4ftr_bits :8; /* fc4 feature bits */ - u32 fc4_type :8; /* corresponding FC4 Type */ + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u32 rsvd1:16; + u32 fc4ftr_bits:8; /* fc4 feature bits */ + u32 fc4_type:8; /* corresponding FC4 Type */ }; /** diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h index 3e39ba58cfb5..14969eecf6a9 100644 --- a/drivers/scsi/bfa/include/protocol/fc.h +++ b/drivers/scsi/bfa/include/protocol/fc.h @@ -486,14 +486,14 @@ struct fc_rsi_s { * see FC-PH-X table 113 & 115 for explanation also FCP table 8 */ struct fc_prli_params_s{ - u32 reserved: 16; + u32 reserved:16; #ifdef __BIGENDIAN - u32 reserved1: 5; - u32 rec_support : 1; - u32 task_retry_id : 1; - u32 retry : 1; + u32 reserved1:5; + u32 rec_support:1; + u32 task_retry_id:1; + u32 retry:1; - u32 confirm : 1; + u32 confirm:1; u32 doverlay:1; u32 initiator:1; u32 target:1; @@ -502,10 +502,10 @@ struct fc_prli_params_s{ u32 rxrdisab:1; u32 wxrdisab:1; #else - u32 retry : 1; - u32 task_retry_id : 1; - u32 rec_support : 1; - u32 reserved1: 5; + u32 retry:1; + u32 task_retry_id:1; + u32 rec_support:1; + u32 reserved1:5; u32 wxrdisab:1; u32 rxrdisab:1; @@ -514,7 +514,7 @@ struct fc_prli_params_s{ u32 target:1; u32 initiator:1; u32 doverlay:1; - u32 confirm : 1; + u32 confirm:1; #endif }; diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c index a418dedebe9e..f7c7f4f3c640 100644 --- a/drivers/scsi/bfa/loop.c +++ b/drivers/scsi/bfa/loop.c @@ -58,49 +58,16 @@ static const u8 port_loop_alpa_map[] = { /* * Local Functions */ -bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, - u8 alpa); - -void bfa_fcs_port_loop_plogi_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); - -bfa_status_t bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, - u8 alpa); - -void bfa_fcs_port_loop_adisc_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); - -bfa_status_t bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, - u8 alpa); - -void bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); - -bfa_status_t bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, - u8 alpa); - -void bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); +static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, + u8 alpa); + +static void bfa_fcs_port_loop_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); /** * Called by port to initializar in provate LOOP topology. */ @@ -179,7 +146,7 @@ bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port) /** * Local Functions. */ -bfa_status_t +static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) { struct fchs_s fchs; @@ -208,7 +175,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) /** * Called by fcxp to notify the Plogi response */ -void +static void bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, @@ -244,179 +211,3 @@ bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_assert(0); } } - -bfa_status_t -bfa_fcs_port_loop_send_plogi_acc(struct bfa_fcs_port_s *port, u8 alpa) -{ - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(port->fcs, alpa); - - fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, - NULL); - bfa_assert(fcxp); - - len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, - bfa_fcs_port_loop_plogi_acc_response, - (void *)port, FC_MAX_PDUSZ, 0); /* No response - * expected - */ - - return BFA_STATUS_OK; -} - -/* - * Plogi Acc Response - * We donot do any processing here. - */ -void -bfa_fcs_port_loop_plogi_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - - struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; - - bfa_trc(port->fcs, port->pid); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - return; - } -} - -bfa_status_t -bfa_fcs_port_loop_send_adisc(struct bfa_fcs_port_s *port, u8 alpa) -{ - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(port->fcs, alpa); - - fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, - NULL); - bfa_assert(fcxp); - - len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, - bfa_fcs_port_loop_adisc_response, (void *)port, - FC_MAX_PDUSZ, FC_RA_TOV); - - return BFA_STATUS_OK; -} - -/** - * Called by fcxp to notify the ADISC response - */ -void -bfa_fcs_port_loop_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; - struct bfa_fcs_rport_s *rport; - struct fc_adisc_s *adisc_resp; - struct fc_els_cmd_s *els_cmd; - u32 pid = rsp_fchs->s_id; - - bfa_trc(port->fcs, req_status); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - /* - * TBD : we may need to retry certain requests - */ - bfa_fcxp_free(fcxp); - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - adisc_resp = (struct fc_adisc_s *) els_cmd; - - if (els_cmd->els_code == FC_ELS_ACC) { - } else { - bfa_trc(port->fcs, adisc_resp->els_cmd.els_code); - - /* - * TBD: we may need to check for reject codes and retry - */ - rport = bfa_fcs_port_get_rport_by_pid(port, pid); - if (rport) { - list_del(&rport->qe); - bfa_fcs_rport_delete(rport); - } - - } - return; -} - -bfa_status_t -bfa_fcs_port_loop_send_adisc_acc(struct bfa_fcs_port_s *port, u8 alpa) -{ - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(port->fcs, alpa); - - fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, - NULL); - bfa_assert(fcxp); - - len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, - bfa_fcs_port_loop_adisc_acc_response, - (void *)port, FC_MAX_PDUSZ, 0); /* no reponse - * expected - */ - - return BFA_STATUS_OK; -} - -/* - * Adisc Acc Response - * We donot do any processing here. - */ -void -bfa_fcs_port_loop_adisc_acc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - - struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; - - bfa_trc(port->fcs, port->pid); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - return; - } -} diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c index 8f51a83f1834..1e06792cd4c2 100644 --- a/drivers/scsi/bfa/lport_api.c +++ b/drivers/scsi/bfa/lport_api.c @@ -43,7 +43,7 @@ bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg) struct bfa_fcs_port_s * bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) { - return (&fcs->fabric.bport); + return &fcs->fabric.bport; } wwn_t @@ -88,11 +88,10 @@ bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index, } bfa_trc(fcs, i); - if (rport) { + if (rport) return rport->pwwn; - } else { + else return (wwn_t) 0; - } } void @@ -198,17 +197,17 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) vf = bfa_fcs_vf_lookup(fcs, vf_id); if (vf == NULL) { bfa_trc(fcs, vf_id); - return (NULL); + return NULL; } if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) - return (&vf->bport); + return &vf->bport; vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); if (vport) - return (&vport->lport); + return &vport->lport; - return (NULL); + return NULL; } /* diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c index 59fea99d67a4..2f8b880060bb 100644 --- a/drivers/scsi/bfa/ns.c +++ b/drivers/scsi/bfa/ns.c @@ -932,11 +932,10 @@ bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) } ns->fcxp = fcxp; - if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; - } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) { + else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET; - } len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP, diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c index 86af818d17bb..fcb8864d3276 100644 --- a/drivers/scsi/bfa/plog.c +++ b/drivers/scsi/bfa/plog.c @@ -180,5 +180,5 @@ bfa_plog_disable(struct bfa_plog_s *plog) bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog) { - return((bfa_boolean_t)plog->plog_enabled); + return (bfa_boolean_t)plog->plog_enabled; } diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c index 8a1f59d596c1..e1932c885ac2 100644 --- a/drivers/scsi/bfa/rport_ftrs.c +++ b/drivers/scsi/bfa/rport_ftrs.c @@ -79,7 +79,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_trc(rport->fcs, event); switch (event) { - case RPFSM_EVENT_RPORT_ONLINE : + case RPFSM_EVENT_RPORT_ONLINE: if (!BFA_FCS_PID_IS_WKA(rport->pid)) { bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); rpf->rpsc_retries = 0; @@ -87,7 +87,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; }; - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: break; default: @@ -107,7 +107,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); break; - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); rpf->rpsc_retries = 0; @@ -130,11 +130,10 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) case RPFSM_EVENT_RPSC_COMP: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); /* Update speed info in f/w via BFA */ - if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) { + if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); - } else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) { + else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); - } break; case RPFSM_EVENT_RPSC_FAIL: @@ -154,7 +153,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) } break; - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); bfa_fcxp_discard(rpf->fcxp); rpf->rpsc_retries = 0; @@ -174,13 +173,13 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_trc(rport->fcs, event); switch (event) { - case RPFSM_EVENT_TIMEOUT : + case RPFSM_EVENT_TIMEOUT: /* re-send the RPSC */ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); bfa_fcs_rpf_send_rpsc2(rpf, NULL); break; - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: bfa_timer_stop(&rpf->timer); bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); rpf->rpsc_retries = 0; @@ -201,7 +200,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_trc(rport->fcs, event); switch (event) { - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); rpf->rpsc_retries = 0; break; @@ -221,12 +220,12 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_trc(rport->fcs, event); switch (event) { - case RPFSM_EVENT_RPORT_ONLINE : + case RPFSM_EVENT_RPORT_ONLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); bfa_fcs_rpf_send_rpsc2(rpf, NULL); break; - case RPFSM_EVENT_RPORT_OFFLINE : + case RPFSM_EVENT_RPORT_OFFLINE: break; default: @@ -366,10 +365,9 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_trc(rport->fcs, ls_rjt->reason_code); bfa_trc(rport->fcs, ls_rjt->reason_code_expl); rport->stats.rpsc_rejects++; - if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { + if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); - } else { + else bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); - } } } diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c index 31d81fe2fc48..391a4790bebd 100644 --- a/drivers/scsi/bfa/vfapi.c +++ b/drivers/scsi/bfa/vfapi.c @@ -189,7 +189,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) - return (&fcs->fabric); + return &fcs->fabric; /** * @todo vf support diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index c10af06c5714..e90f1e38c32d 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -31,13 +31,13 @@ BFA_TRC_FILE(FCS, VPORT); -#define __vport_fcs(__vp) (__vp)->lport.fcs -#define __vport_pwwn(__vp) (__vp)->lport.port_cfg.pwwn -#define __vport_nwwn(__vp) (__vp)->lport.port_cfg.nwwn -#define __vport_bfa(__vp) (__vp)->lport.fcs->bfa -#define __vport_fcid(__vp) (__vp)->lport.pid -#define __vport_fabric(__vp) (__vp)->lport.fabric -#define __vport_vfid(__vp) (__vp)->lport.fabric->vf_id +#define __vport_fcs(__vp) ((__vp)->lport.fcs) +#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) +#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) +#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) +#define __vport_fcid(__vp) ((__vp)->lport.pid) +#define __vport_fabric(__vp) ((__vp)->lport.fabric) +#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) #define BFA_FCS_VPORT_MAX_RETRIES 5 /* @@ -641,9 +641,9 @@ bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs) bfa_get_attr(fcs->bfa, &ioc_attr); if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) - return (BFA_FCS_MAX_VPORTS_SUPP_CT); + return BFA_FCS_MAX_VPORTS_SUPP_CT; else - return (BFA_FCS_MAX_VPORTS_SUPP_CB); + return BFA_FCS_MAX_VPORTS_SUPP_CB; } @@ -675,7 +675,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, struct bfad_vport_s *vport_drv) { if (vport_cfg->pwwn == 0) - return (BFA_STATUS_INVALID_WWN); + return BFA_STATUS_INVALID_WWN; if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) return BFA_STATUS_VPORT_WWN_BP; -- cgit v1.2.3-59-g8ed1b From 3420d36cac2f1d28fc99290de12dd66dfaf65d8e Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 13 Oct 2009 15:16:45 -0700 Subject: [SCSI] qla2xxx: Add firmware-dump kobject uevent notification. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- Documentation/ABI/stable/sysfs-driver-qla2xxx | 8 +++ drivers/scsi/qla2xxx/qla_dbg.c | 78 ++++++++------------------- drivers/scsi/qla2xxx/qla_def.h | 5 ++ drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_os.c | 35 ++++++++++++ 5 files changed, 72 insertions(+), 55 deletions(-) create mode 100644 Documentation/ABI/stable/sysfs-driver-qla2xxx diff --git a/Documentation/ABI/stable/sysfs-driver-qla2xxx b/Documentation/ABI/stable/sysfs-driver-qla2xxx new file mode 100644 index 000000000000..9a59d84497ed --- /dev/null +++ b/Documentation/ABI/stable/sysfs-driver-qla2xxx @@ -0,0 +1,8 @@ +What: /sys/bus/pci/drivers/qla2xxx/.../devices/* +Date: September 2009 +Contact: QLogic Linux Driver +Description: qla2xxx-udev.sh currently looks for uevent CHANGE events to + signal a firmware-dump has been generated by the driver and is + ready for retrieval. +Users: qla2xxx-udev.sh. Proposed changes should be mailed to + linux-driver@qlogic.com diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index cca8e4ab0372..cb2eca4c26d8 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -377,6 +377,24 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) return ptr + sizeof(struct qla2xxx_mq_chain); } +static void +qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) +{ + struct qla_hw_data *ha = vha->hw; + + if (rval != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Failed to dump firmware (%x)!!!\n", rval); + ha->fw_dumped = 0; + } else { + qla_printk(KERN_INFO, ha, + "Firmware dump saved to temp buffer (%ld/%p).\n", + vha->host_no, ha->fw_dump); + ha->fw_dumped = 1; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } +} + /** * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. * @ha: HA context @@ -530,17 +548,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, nxt); - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); - ha->fw_dumped = 0; - - } else { - qla_printk(KERN_INFO, ha, - "Firmware dump saved to temp buffer (%ld/%p).\n", - base_vha->host_no, ha->fw_dump); - ha->fw_dumped = 1; - } + qla2xxx_dump_post_process(base_vha, rval); qla2300_fw_dump_failed: if (!hardware_locked) @@ -737,17 +745,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); - ha->fw_dumped = 0; - - } else { - qla_printk(KERN_INFO, ha, - "Firmware dump saved to temp buffer (%ld/%p).\n", - base_vha->host_no, ha->fw_dump); - ha->fw_dumped = 1; - } + qla2xxx_dump_post_process(base_vha, rval); qla2100_fw_dump_failed: if (!hardware_locked) @@ -984,17 +982,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) qla24xx_copy_eft(ha, nxt); qla24xx_fw_dump_failed_0: - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); - ha->fw_dumped = 0; - - } else { - qla_printk(KERN_INFO, ha, - "Firmware dump saved to temp buffer (%ld/%p).\n", - base_vha->host_no, ha->fw_dump); - ha->fw_dumped = 1; - } + qla2xxx_dump_post_process(base_vha, rval); qla24xx_fw_dump_failed: if (!hardware_locked) @@ -1305,17 +1293,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) } qla25xx_fw_dump_failed_0: - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); - ha->fw_dumped = 0; - - } else { - qla_printk(KERN_INFO, ha, - "Firmware dump saved to temp buffer (%ld/%p).\n", - base_vha->host_no, ha->fw_dump); - ha->fw_dumped = 1; - } + qla2xxx_dump_post_process(base_vha, rval); qla25xx_fw_dump_failed: if (!hardware_locked) @@ -1628,17 +1606,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) } qla81xx_fw_dump_failed_0: - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); - ha->fw_dumped = 0; - - } else { - qla_printk(KERN_INFO, ha, - "Firmware dump saved to temp buffer (%ld/%p).\n", - base_vha->host_no, ha->fw_dump); - ha->fw_dumped = 1; - } + qla2xxx_dump_post_process(base_vha, rval); qla81xx_fw_dump_failed: if (!hardware_locked) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 215061861794..d8ce31040b51 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2123,6 +2123,7 @@ enum qla_work_type { QLA_EVT_ASYNC_LOGIN_DONE, QLA_EVT_ASYNC_LOGOUT, QLA_EVT_ASYNC_LOGOUT_DONE, + QLA_EVT_UEVENT, }; @@ -2146,6 +2147,10 @@ struct qla_work_evt { #define QLA_LOGIO_LOGIN_RETRIED BIT_0 u16 data[2]; } logio; + struct { + u32 code; +#define QLA_UEVENT_CODE_FW_DUMP 0 + } uevent; } u; }; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f3d1d1afa95b..14e0562851cb 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -92,6 +92,7 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); +extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b79fca7d461b..ecf2a40d70be 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -2653,6 +2654,37 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); +int +qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.uevent.code = code; + return qla2x00_post_work(vha, e); +} + +static void +qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) +{ + char event_string[40]; + char *envp[] = { event_string, NULL }; + + switch (code) { + case QLA_UEVENT_CODE_FW_DUMP: + snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", + vha->host_no); + break; + default: + /* do nothing */ + break; + } + kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -2690,6 +2722,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_logout_done(vha, e->u.logio.fcport, e->u.logio.data); break; + case QLA_EVT_UEVENT: + qla2x00_uevent_emit(vha, e->u.uevent.code); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); -- cgit v1.2.3-59-g8ed1b From bdab23da71c36904693d276c8b28308dc94642aa Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 13 Oct 2009 15:16:46 -0700 Subject: [SCSI] qla2xxx: Display additional mailbox registers during AEN handling. The mailbox register values may assist in debugging efforts. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b20a7169aac2..4d758d29523c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -313,10 +313,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; char *link_speed; uint16_t handle_cnt; - uint16_t cnt; + uint16_t cnt, mbx; uint32_t handles[5]; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; uint32_t rscn_entry, host_pid; uint8_t rscn_queue_index; unsigned long flags; @@ -395,9 +396,10 @@ skip_rio: break; case MBA_SYSTEM_ERR: /* System Error */ + mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; qla_printk(KERN_INFO, ha, - "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", - mb[1], mb[2], mb[3]); + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " + "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); @@ -419,9 +421,10 @@ skip_rio: break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ - DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); + DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", + vha->host_no, mb[1])); + qla_printk(KERN_WARNING, ha, + "ISP Request Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; @@ -485,10 +488,13 @@ skip_rio: break; case MBA_LOOP_DOWN: /* Loop Down Event */ + mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " - "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); - qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", - mb[1], mb[2], mb[3]); + "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], + mbx)); + qla_printk(KERN_INFO, ha, + "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], + mbx); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); -- cgit v1.2.3-59-g8ed1b From 29c5397fc1d28f9b75057644ce8e546671d2a9a9 Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Tue, 13 Oct 2009 15:16:47 -0700 Subject: [SCSI] qla2xxx: Reread firmware versions information after an ISP abort. In some case, the MPI and PHY versions when retrieved after the Execute-FW mailbox-command are incorrect (255.255.255.255). Instead, query the information after the check for firmware ready is done in the abort ISP path. Signed-off-by: Lalit Chandivade Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9e3eaac25596..c2494ca6ba1c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3573,6 +3573,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + if (IS_QLA81XX(ha)) + qla2x00_get_fw_version(vha, + &ha->fw_major_version, + &ha->fw_minor_version, + &ha->fw_subminor_version, + &ha->fw_attributes, &ha->fw_memory_size, + ha->mpi_version, &ha->mpi_capabilities, + ha->phy_version); + if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, -- cgit v1.2.3-59-g8ed1b From b5d0329f424df20c67d0d9ee979fbd2b8b5ed74d Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Tue, 13 Oct 2009 15:16:48 -0700 Subject: [SCSI] qla2xxx: Set the size of the host buffer used to fetch DCBX and XGMAC parameters to 4K. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index d8ce31040b51..6b9bf23c7735 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2440,11 +2440,11 @@ struct qla_hw_data { dma_addr_t edc_data_dma; uint16_t edc_data_len; -#define XGMAC_DATA_SIZE PAGE_SIZE +#define XGMAC_DATA_SIZE 4096 void *xgmac_data; dma_addr_t xgmac_data_dma; -#define DCBX_TLV_DATA_SIZE PAGE_SIZE +#define DCBX_TLV_DATA_SIZE 4096 void *dcbx_tlv; dma_addr_t dcbx_tlv_dma; -- cgit v1.2.3-59-g8ed1b From f3a0a77e8df2f5c78648ce5971176e610dbc35c0 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 13 Oct 2009 15:16:49 -0700 Subject: [SCSI] qla2xxx: Retrieve firmware's maximum number of supported FCFs. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 2 +- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_mbx.c | 13 +++++++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 14e0562851cb..e21851358509 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -247,7 +247,7 @@ qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *); extern int qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, - uint16_t *, uint16_t *, uint16_t *); + uint16_t *, uint16_t *, uint16_t *, uint16_t *); extern int qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c2494ca6ba1c..c8b24b65e529 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1203,7 +1203,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) } qla2x00_get_resource_cnts(vha, NULL, &ha->fw_xcb_count, NULL, NULL, - &ha->max_npiv_vports); + &ha->max_npiv_vports, NULL); if (!fw_major_version && ql2xallocfwdump) qla2x00_alloc_fw_dump(vha); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b6202fe118ac..a10d41bf8f26 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2006,7 +2006,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, int qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, - uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) + uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) { int rval; mbx_cmd_t mc; @@ -2017,6 +2017,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_QLA81XX(vha->hw)) + mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -2027,9 +2029,10 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, vha->host_no, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " - "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no, - mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], - mcp->mb[10], mcp->mb[11])); + "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, + vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], + mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], + mcp->mb[12])); if (cur_xchg_cnt) *cur_xchg_cnt = mcp->mb[3]; @@ -2041,6 +2044,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; + if (IS_QLA81XX(vha->hw) && max_fcfs) + *max_fcfs = mcp->mb[12]; } return (rval); -- cgit v1.2.3-59-g8ed1b From 9ca1d01f7aa035553501a34054ea17e7537eb07e Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 13 Oct 2009 15:16:50 -0700 Subject: [SCSI] qla2xxx: Properly check FCP_RSP response-info field after TMF completion. Original code discarded response-info field information and assumed the command completed successfully without verifying the target's status within the FCP_RSP packet. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a10d41bf8f26..791f792a05ce 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2318,6 +2318,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, { int rval, rval2; struct tsk_mgmt_cmd *tsk; + struct sts_entry_24xx *sts; dma_addr_t tsk_dma; scsi_qla_host_t *vha; struct qla_hw_data *ha; @@ -2357,20 +2358,37 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, sizeof(tsk->p.tsk.lun)); } + sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " "(%x).\n", __func__, vha->host_no, name, rval)); - } else if (tsk->p.sts.entry_status != 0) { + } else if (sts->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, vha->host_no, - tsk->p.sts.entry_status)); + sts->entry_status)); rval = QLA_FUNCTION_FAILED; - } else if (tsk->p.sts.comp_status != + } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, - vha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); + vha->host_no, le16_to_cpu(sts->comp_status))); + rval = QLA_FUNCTION_FAILED; + } else if (!(le16_to_cpu(sts->scsi_status) & + SS_RESPONSE_INFO_LEN_VALID)) { + DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " + "-- no response info (%x).\n", __func__, vha->host_no, + le16_to_cpu(sts->scsi_status))); + rval = QLA_FUNCTION_FAILED; + } else if (le32_to_cpu(sts->rsp_data_len) < 4) { + DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " + "-- not enough response info (%d).\n", __func__, + vha->host_no, le32_to_cpu(sts->rsp_data_len))); + rval = QLA_FUNCTION_FAILED; + } else if (sts->data[3]) { + DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " + "-- response (%x).\n", __func__, + vha->host_no, sts->data[3])); rval = QLA_FUNCTION_FAILED; } -- cgit v1.2.3-59-g8ed1b From 531a82d1bd73152130b9e3b1f3e2e875c6cff7cd Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 13 Oct 2009 15:16:51 -0700 Subject: [SCSI] qla2xxx: Properly re-register FC4/FDMI after physical and logical link disruptions. Original code would not register FC4 nor FDMI information after a logical tear-down of an VFC link. Code now triggers registration date during processing of a 'Report ID Acquisition IOCB', which is submitted after a FLOGI or FDISC completes. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 1 - drivers/scsi/qla2xxx/qla_mbx.c | 11 ++++++++--- drivers/scsi/qla2xxx/qla_mid.c | 2 -- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c8b24b65e529..b74924b279ef 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -277,7 +277,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) vha->marker_needed = 0; ha->isp_abort_cnt = 0; ha->beacon_blink_led = 0; - set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 791f792a05ce..05d595d9a7ef 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2782,8 +2782,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0])); - if (vp_idx == 0) - return; + + vp = vha; + if (vp_idx == 0 && (MSB(stat) != 1)) + goto reg_needed; if (MSB(stat) == 1) { DEBUG2(printk("scsi(%ld): Could not acquire ID for " @@ -2806,8 +2808,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, * response queue. Handle it in dpc context. */ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); - set_bit(VP_DPC_NEEDED, &vha->dpc_flags); +reg_needed: + set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index e07b3617f019..a47d34308a3a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -382,8 +382,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) vha->mgmt_svr_loop_id = 10 + vha->vp_idx; vha->dpc_flags = 0L; - set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); - set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); /* * To fix the issue of processing a parent's RSCN for the vport before -- cgit v1.2.3-59-g8ed1b From 0f00a206ccb1dc644b6770ef25f185610fee6962 Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Tue, 13 Oct 2009 15:16:52 -0700 Subject: [SCSI] qla2xxx: Properly handle UNDERRUN completion statuses. Correct issues where the lower scsi-status would be improperly cleared, instead, allow the midlayer to process the status after the proper residual-count checks are performed. Finally, validate firmware status flags prior to assigning values from the FCP_RSP frame. Signed-off-by: Lalit Chandivade Signed-off-by: Michael Hernandez Signed-off-by: Ravi Anand Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 120 ++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 4d758d29523c..804987397b77 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1353,16 +1353,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (IS_FWI2_CAPABLE(ha)) { - sense_len = le32_to_cpu(sts24->sense_len); - rsp_info_len = le32_to_cpu(sts24->rsp_data_len); - resid_len = le32_to_cpu(sts24->rsp_residual_count); - fw_resid_len = le32_to_cpu(sts24->residual_len); + if (scsi_status & SS_SENSE_LEN_VALID) + sense_len = le32_to_cpu(sts24->sense_len); + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) + rsp_info_len = le32_to_cpu(sts24->rsp_data_len); + if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) + resid_len = le32_to_cpu(sts24->rsp_residual_count); + if (comp_status == CS_DATA_UNDERRUN) + fw_resid_len = le32_to_cpu(sts24->residual_len); rsp_info = sts24->data; sense_data = sts24->data; host_to_fcp_swap(sts24->data, sizeof(sts24->data)); } else { - sense_len = le16_to_cpu(sts->req_sense_length); - rsp_info_len = le16_to_cpu(sts->rsp_info_len); + if (scsi_status & SS_SENSE_LEN_VALID) + sense_len = le16_to_cpu(sts->req_sense_length); + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) + rsp_info_len = le16_to_cpu(sts->rsp_info_len); resid_len = le32_to_cpu(sts->residual_length); rsp_info = sts->rsp_info; sense_data = sts->req_sense_data; @@ -1449,38 +1455,62 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; case CS_DATA_UNDERRUN: - resid = resid_len; + DEBUG2(printk(KERN_INFO + "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. " + "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n", + vha->host_no, cp->device->id, cp->device->lun, comp_status, + scsi_status, resid_len, fw_resid_len, cp->cmnd[0], + cp->underflow)); + /* Use F/W calculated residual length. */ - if (IS_FWI2_CAPABLE(ha)) { - if (!(scsi_status & SS_RESIDUAL_UNDER)) { - lscsi_status = 0; - } else if (resid != fw_resid_len) { - scsi_status &= ~SS_RESIDUAL_UNDER; - lscsi_status = 0; + resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; + scsi_set_resid(cp, resid); + if (scsi_status & SS_RESIDUAL_UNDER) { + if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { + DEBUG2(printk( + "scsi(%ld:%d:%d:%d) Dropped frame(s) " + "detected (%x of %x bytes)...residual " + "length mismatch...retrying command.\n", + vha->host_no, cp->device->channel, + cp->device->id, cp->device->lun, resid, + scsi_bufflen(cp))); + + cp->result = DID_ERROR << 16 | lscsi_status; + break; } - resid = fw_resid_len; - } - if (scsi_status & SS_RESIDUAL_UNDER) { - scsi_set_resid(cp, resid); - } else { - DEBUG2(printk(KERN_INFO - "scsi(%ld:%d:%d) UNDERRUN status detected " - "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " - "os_underflow=0x%x\n", vha->host_no, - cp->device->id, cp->device->lun, comp_status, - scsi_status, resid_len, resid, cp->cmnd[0], - cp->underflow)); + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + qla_printk(KERN_INFO, ha, + "scsi(%ld:%d:%d:%d): Mid-layer underflow " + "detected (%x of %x bytes)...returning " + "error status.\n", vha->host_no, + cp->device->channel, cp->device->id, + cp->device->lun, resid, scsi_bufflen(cp)); + cp->result = DID_ERROR << 16; + break; + } + } else if (!lscsi_status) { + DEBUG2(printk( + "scsi(%ld:%d:%d:%d) Dropped frame(s) detected " + "(%x of %x bytes)...firmware reported underrun..." + "retrying command.\n", vha->host_no, + cp->device->channel, cp->device->id, + cp->device->lun, resid, scsi_bufflen(cp))); + + cp->result = DID_ERROR << 16; + break; } + cp->result = DID_OK << 16 | lscsi_status; + /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. */ if (lscsi_status != 0) { - cp->result = DID_OK << 16 | lscsi_status; - if (lscsi_status == SAM_STAT_TASK_SET_FULL) { DEBUG2(printk(KERN_INFO "scsi(%ld): QUEUE FULL status detected " @@ -1507,42 +1537,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; qla2x00_handle_sense(sp, sense_data, sense_len, rsp); - } else { - /* - * If RISC reports underrun and target does not report - * it then we must have a lost frame, so tell upper - * layer to retry it by reporting an error. - */ - if (!(scsi_status & SS_RESIDUAL_UNDER)) { - DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " - "frame(s) detected (%x of %x bytes)..." - "retrying command.\n", - vha->host_no, cp->device->channel, - cp->device->id, cp->device->lun, resid, - scsi_bufflen(cp))); - - scsi_set_resid(cp, resid); - cp->result = DID_ERROR << 16; - break; - } - - /* Handle mid-layer underflow */ - if ((unsigned)(scsi_bufflen(cp) - resid) < - cp->underflow) { - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d:%d): Mid-layer underflow " - "detected (%x of %x bytes)...returning " - "error status.\n", vha->host_no, - cp->device->channel, cp->device->id, - cp->device->lun, resid, - scsi_bufflen(cp)); - - cp->result = DID_ERROR << 16; - break; - } - - /* Everybody online, looking good... */ - cp->result = DID_OK << 16; } break; -- cgit v1.2.3-59-g8ed1b From 54a3b30e758ec90c5cf860637e28b2d1142af18e Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Tue, 13 Oct 2009 15:16:53 -0700 Subject: [SCSI] qla2xxx: Update version number to 8.03.01-k7 Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index ac107a2c34a4..807e0dbc67fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k6" +#define QLA2XXX_VERSION "8.03.01-k7" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -- cgit v1.2.3-59-g8ed1b From f57e4502cea471c69782d4790c71d8414ab49a9d Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Thu, 15 Oct 2009 14:43:23 -0400 Subject: [SCSI] scsi: Add missing command definitions Add definitions for UNMAP, WRITE SAME{16,32} and GET LBA STATUS commands. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/constants.c | 1 + include/scsi/scsi.h | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 63abb06c4edb..9129bcf117cf 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -141,6 +141,7 @@ static const struct value_name_pair serv_out12_arr[] = { static const struct value_name_pair serv_in16_arr[] = { {0x10, "Read capacity(16)"}, {0x11, "Read long(16)"}, + {0x12, "Get LBA status"}, }; #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 34c46ab5c31b..8b4deca996ad 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -94,6 +94,7 @@ struct scsi_cmnd; #define WRITE_LONG 0x3f #define CHANGE_DEFINITION 0x40 #define WRITE_SAME 0x41 +#define UNMAP 0x42 #define READ_TOC 0x43 #define LOG_SELECT 0x4c #define LOG_SENSE 0x4d @@ -122,9 +123,11 @@ struct scsi_cmnd; #define READ_16 0x88 #define WRITE_16 0x8a #define VERIFY_16 0x8f +#define WRITE_SAME_16 0x93 #define SERVICE_ACTION_IN 0x9e /* values for service action in */ #define SAI_READ_CAPACITY_16 0x10 +#define SAI_GET_LBA_STATUS 0x12 /* values for maintenance in */ #define MI_REPORT_TARGET_PGS 0x0a /* values for maintenance out */ @@ -132,6 +135,7 @@ struct scsi_cmnd; /* values for variable length command */ #define READ_32 0x09 #define WRITE_32 0x0b +#define WRITE_SAME_32 0x0d /* Values for T10/04-262r7 */ #define ATA_16 0x85 /* 16-byte pass-thru */ -- cgit v1.2.3-59-g8ed1b From 44d9269481bb43df445adf464b06ff031e67d7ea Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Thu, 15 Oct 2009 14:45:27 -0400 Subject: [SCSI] scsi_debug: Thin provisioning support This version fixes 64-bit modulo on 32-bit as well as inadvertent map updates when TP was disabled. Implement support for thin provisioning in scsi_debug. No actual memory de-allocation is taking place. The intent is to emulate a thinly provisioned storage device, not to be one. There are four new module options: - unmap_granularity specifies the granularity at which to track mapped blocks (specified in number of logical blocks). 2048 (1 MB) is a realistic value for disk arrays although some may have a finer granularity. - unmap_alignment specifies the first LBA which is naturally aligned on an unmap_granularity boundary. - unmap_max_desc specifies the maximum number of ranges that can be unmapped using one UNMAP command. If this is 0, only WRITE SAME is supported and UNMAP will cause a check condition. - unmap_max_blocks specifies the maximum number of blocks that can be unmapped using a single UNMAP command. Default is 0xffffffff. These parameters are reported in the new and extended block limits VPD. If unmap_granularity is specified the device is tagged as thin provisioning capable in READ CAPACITY(16). A bitmap is allocated to track whether blocks are mapped or not. A WRITE request will cause a block to be mapped. So will WRITE SAME unless the UNMAP bit is set. Blocks can be unmapped using either WRITE SAME or UNMAP. No accounting is done to track partial blocks. This means that only whole blocks will be marked free. This is how the array people tell me their firmwares work. GET LBA STATUS is also supported. This command reports whether a block is mapped or not, and how long the adjoining mapped/unmapped extent is. The block allocation bitmap can also be viewed from user space via: /sys/bus/pseudo/drivers/scsi_debug/map Signed-off-by: Martin K. Petersen Acked-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 338 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 335 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index c4103bef41b5..cb4bf16b4e66 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -44,6 +44,8 @@ #include +#include + #include #include #include @@ -105,6 +107,10 @@ static const char * scsi_debug_version_date = "20070104"; #define DEF_ATO 1 #define DEF_PHYSBLK_EXP 0 #define DEF_LOWEST_ALIGNED 0 +#define DEF_UNMAP_MAX_BLOCKS 0 +#define DEF_UNMAP_MAX_DESC 0 +#define DEF_UNMAP_GRANULARITY 0 +#define DEF_UNMAP_ALIGNMENT 0 /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 @@ -162,6 +168,10 @@ static int scsi_debug_guard = DEF_GUARD; static int scsi_debug_ato = DEF_ATO; static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; +static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; static int scsi_debug_cmnd_count = 0; @@ -223,7 +233,9 @@ static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; static unsigned char * fake_storep; /* ramdisk storage */ static unsigned char *dif_storep; /* protection info */ +static void *map_storep; /* provisioning map */ +static unsigned long map_size; static int num_aborts = 0; static int num_dev_resets = 0; static int num_bus_resets = 0; @@ -317,6 +329,7 @@ static void get_data_transfer_info(unsigned char *cmd, (u32)cmd[28] << 24; break; + case WRITE_SAME_16: case WRITE_16: case READ_16: *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | @@ -335,6 +348,7 @@ static void get_data_transfer_info(unsigned char *cmd, *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | (u32)cmd[6] << 24; break; + case WRITE_SAME: case WRITE_10: case READ_10: case XDWRITEREAD_10: @@ -691,6 +705,29 @@ static int inquiry_evpd_b0(unsigned char * arr) arr[6] = (sdebug_store_sectors >> 8) & 0xff; arr[7] = sdebug_store_sectors & 0xff; } + + if (scsi_debug_unmap_max_desc) { + unsigned int blocks; + + if (scsi_debug_unmap_max_blocks) + blocks = scsi_debug_unmap_max_blocks; + else + blocks = 0xffffffff; + + put_unaligned_be32(blocks, &arr[16]); + put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); + } + + if (scsi_debug_unmap_alignment) { + put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); + arr[28] |= 0x80; /* UGAVALID */ + } + + if (scsi_debug_unmap_granularity) { + put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); + return 0x3c; /* Mandatory page length for thin provisioning */ + } + return sizeof(vpdb0_data); } @@ -974,6 +1011,10 @@ static int resp_readcap16(struct scsi_cmnd * scp, arr[11] = scsi_debug_sector_size & 0xff; arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; + + if (scsi_debug_unmap_granularity) + arr[14] |= 0x80; /* TPE */ + arr[15] = scsi_debug_lowest_aligned & 0xff; if (scsi_debug_dif) { @@ -1887,6 +1928,70 @@ out: return ret; } +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + unsigned int granularity, alignment, mapped; + sector_t block, next, end; + + granularity = scsi_debug_unmap_granularity; + alignment = granularity - scsi_debug_unmap_alignment; + block = lba + alignment; + do_div(block, granularity); + + mapped = test_bit(block, map_storep); + + if (mapped) + next = find_next_zero_bit(map_storep, map_size, block); + else + next = find_next_bit(map_storep, map_size, block); + + end = next * granularity - scsi_debug_unmap_alignment; + *num = end - lba; + + return mapped; +} + +static void map_region(sector_t lba, unsigned int len) +{ + unsigned int granularity, alignment; + sector_t end = lba + len; + + granularity = scsi_debug_unmap_granularity; + alignment = granularity - scsi_debug_unmap_alignment; + + while (lba < end) { + sector_t block, rem; + + block = lba + alignment; + rem = do_div(block, granularity); + + set_bit(block, map_storep); + + lba += granularity - rem; + } +} + +static void unmap_region(sector_t lba, unsigned int len) +{ + unsigned int granularity, alignment; + sector_t end = lba + len; + + granularity = scsi_debug_unmap_granularity; + alignment = granularity - scsi_debug_unmap_alignment; + + while (lba < end) { + sector_t block, rem; + + block = lba + alignment; + rem = do_div(block, granularity); + + if (rem == 0 && lba + granularity <= end) + clear_bit(block, map_storep); + + lba += granularity - rem; + } +} + static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip, u32 ei_lba) @@ -1910,6 +2015,8 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); + if (scsi_debug_unmap_granularity) + map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) return (DID_ERROR << 16); @@ -1917,9 +2024,143 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); + + return 0; +} + +static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, + unsigned int num, struct sdebug_dev_info *devip, + u32 ei_lba, unsigned int unmap) +{ + unsigned long iflags; + unsigned long long i; + int ret; + + ret = check_device_access_params(devip, lba, num); + if (ret) + return ret; + + write_lock_irqsave(&atomic_rw, iflags); + + if (unmap && scsi_debug_unmap_granularity) { + unmap_region(lba, num); + goto out; + } + + /* Else fetch one logical block */ + ret = fetch_to_dev_buffer(scmd, + fake_storep + (lba * scsi_debug_sector_size), + scsi_debug_sector_size); + + if (-1 == ret) { + write_unlock_irqrestore(&atomic_rw, iflags); + return (DID_ERROR << 16); + } else if ((ret < (num * scsi_debug_sector_size)) && + (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, " + " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); + + /* Copy first sector to remaining blocks */ + for (i = 1 ; i < num ; i++) + memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), + fake_storep + (lba * scsi_debug_sector_size), + scsi_debug_sector_size); + + if (scsi_debug_unmap_granularity) + map_region(lba, num); +out: + write_unlock_irqrestore(&atomic_rw, iflags); + return 0; } +struct unmap_block_desc { + __be64 lba; + __be32 blocks; + __be32 __reserved; +}; + +static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) +{ + unsigned char *buf; + struct unmap_block_desc *desc; + unsigned int i, payload_len, descriptors; + int ret; + + ret = check_readiness(scmd, 1, devip); + if (ret) + return ret; + + payload_len = get_unaligned_be16(&scmd->cmnd[7]); + BUG_ON(scsi_bufflen(scmd) != payload_len); + + descriptors = (payload_len - 8) / 16; + + buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); + if (!buf) + return check_condition_result; + + scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); + + BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); + BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); + + desc = (void *)&buf[8]; + + for (i = 0 ; i < descriptors ; i++) { + unsigned long long lba = get_unaligned_be64(&desc[i].lba); + unsigned int num = get_unaligned_be32(&desc[i].blocks); + + ret = check_device_access_params(devip, lba, num); + if (ret) + goto out; + + unmap_region(lba, num); + } + + ret = 0; + +out: + kfree(buf); + + return ret; +} + +#define SDEBUG_GET_LBA_STATUS_LEN 32 + +static int resp_get_lba_status(struct scsi_cmnd * scmd, + struct sdebug_dev_info * devip) +{ + unsigned long long lba; + unsigned int alloc_len, mapped, num; + unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; + int ret; + + ret = check_readiness(scmd, 1, devip); + if (ret) + return ret; + + lba = get_unaligned_be64(&scmd->cmnd[2]); + alloc_len = get_unaligned_be32(&scmd->cmnd[10]); + + if (alloc_len < 24) + return 0; + + ret = check_device_access_params(devip, lba, 1); + if (ret) + return ret; + + mapped = map_state(lba, &num); + + memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); + put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */ + put_unaligned_be64(lba, &arr[8]); /* LBA */ + put_unaligned_be32(num, &arr[16]); /* Number of blocks */ + arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ + + return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); +} + #define SDEBUG_RLUN_ARR_SZ 256 static int resp_report_luns(struct scsi_cmnd * scp, @@ -2430,6 +2671,10 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO); module_param_named(ato, scsi_debug_ato, int, S_IRUGO); module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); +module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); +module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); +module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); +module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); @@ -2458,6 +2703,10 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)"); +MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)"); +MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)"); +MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); static char sdebug_info[256]; @@ -2816,6 +3065,23 @@ static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf) } DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); +static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) +{ + ssize_t count; + + if (scsi_debug_unmap_granularity == 0) + return scnprintf(buf, PAGE_SIZE, "0-%u\n", + sdebug_store_sectors); + + count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size); + + buf[count++] = '\n'; + buf[count++] = 0; + + return count; +} +DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); + /* Note: The following function creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these @@ -2847,11 +3113,13 @@ static int do_create_driverfs_files(void) ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map); return ret; } static void do_remove_driverfs_files(void) { + driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); @@ -2989,6 +3257,36 @@ static int __init scsi_debug_init(void) memset(dif_storep, 0xff, dif_size); } + if (scsi_debug_unmap_granularity) { + unsigned int map_bytes; + + if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + printk(KERN_ERR + "%s: ERR: unmap_granularity < unmap_alignment\n", + __func__); + return -EINVAL; + } + + map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); + map_bytes = map_size >> 3; + map_storep = vmalloc(map_bytes); + + printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", + map_size); + + if (map_storep == NULL) { + printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n"); + ret = -ENOMEM; + goto free_vm; + } + + memset(map_storep, 0x0, map_bytes); + + /* Map first 1KB for partition table */ + if (scsi_debug_num_parts) + map_region(0, 2); + } + ret = device_register(&pseudo_primary); if (ret < 0) { printk(KERN_WARNING "scsi_debug: device_register error: %d\n", @@ -3041,6 +3339,8 @@ bus_unreg: dev_unreg: device_unregister(&pseudo_primary); free_vm: + if (map_storep) + vfree(map_storep); if (dif_storep) vfree(dif_storep); vfree(fake_storep); @@ -3167,6 +3467,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) int inj_dif = 0; int inj_dix = 0; int delay_override = 0; + int unmap = 0; scsi_set_resid(SCpnt, 0); if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { @@ -3272,13 +3573,21 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) errsts = resp_readcap(SCpnt, devip); break; case SERVICE_ACTION_IN: - if (SAI_READ_CAPACITY_16 != cmd[1]) { + if (cmd[1] == SAI_READ_CAPACITY_16) + errsts = resp_readcap16(SCpnt, devip); + else if (cmd[1] == SAI_GET_LBA_STATUS) { + + if (scsi_debug_unmap_max_desc == 0) { + mk_sense_buffer(devip, ILLEGAL_REQUEST, + INVALID_COMMAND_OPCODE, 0); + errsts = check_condition_result; + } else + errsts = resp_get_lba_status(SCpnt, devip); + } else { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); errsts = check_condition_result; - break; } - errsts = resp_readcap16(SCpnt, devip); break; case MAINTENANCE_IN: if (MI_REPORT_TARGET_PGS != cmd[1]) { @@ -3378,6 +3687,29 @@ write: errsts = illegal_condition_result; } break; + case WRITE_SAME_16: + if (cmd[1] & 0x8) + unmap = 1; + /* fall through */ + case WRITE_SAME: + errsts = check_readiness(SCpnt, 0, devip); + if (errsts) + break; + get_data_transfer_info(cmd, &lba, &num, &ei_lba); + errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap); + break; + case UNMAP: + errsts = check_readiness(SCpnt, 0, devip); + if (errsts) + break; + + if (scsi_debug_unmap_max_desc == 0) { + mk_sense_buffer(devip, ILLEGAL_REQUEST, + INVALID_COMMAND_OPCODE, 0); + errsts = check_condition_result; + } else + errsts = resp_unmap(SCpnt, devip); + break; case MODE_SENSE: case MODE_SENSE_10: errsts = resp_mode_sense(SCpnt, target, devip); -- cgit v1.2.3-59-g8ed1b From 230934a6fe2f44d14ef840639f010c9cf4da098f Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:47 -0500 Subject: [SCSI] ibmvfc: Fixup TMF response handling When processing the response to either a LUN reset, target reset, or an abort task set, the ibmvfc driver needs to treat as success receiving a response with a non-zero status in the response IU along with a general transport error with the FCP response code being zero. The VIOS currently guarantees this cannot happen, but a future version of VIOS may allow this to be returned, so ensure we handle this response combination correctly for TMFs, as we already do for SCSI commands. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bb2c696c006a..c35d8054dbbb 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1731,7 +1731,10 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); wait_for_completion(&evt->comp); - if (rsp_iu.cmd.status) { + if (rsp_iu.cmd.status) + rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + + if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) rsp_code = fc_rsp->data.info.rsp_code; @@ -1820,7 +1823,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); wait_for_completion(&evt->comp); - if (rsp_iu.cmd.status) { + if (rsp_iu.cmd.status) + rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + + if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) rsp_code = fc_rsp->data.info.rsp_code; -- cgit v1.2.3-59-g8ed1b From 7043110550f19c1556ad18dc4d63b1c9eaf9e4fd Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:48 -0500 Subject: [SCSI] ibmvfc: Fix locking in ibmvfc_remove Need to grab the host lock around the call to ibmvfc_link_down. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index c35d8054dbbb..d37230faf086 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4420,7 +4420,11 @@ static int ibmvfc_remove(struct vio_dev *vdev) ENTER; ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); + + spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_wait_while_resetting(vhost); ibmvfc_release_crq_queue(vhost); kthread_stop(vhost->work_thread); -- cgit v1.2.3-59-g8ed1b From 861890c62d46bd29c73d75fc907aeffd1c4eee06 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:49 -0500 Subject: [SCSI] ibmvfc: Remove unnecessary parameter to ibmvfc_init_host Remove a parameter to ibmvfc_init_host which is always set to zero by all callers. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index d37230faf086..696328699ec3 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -558,12 +558,11 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost, /** * ibmvfc_init_host - Start host initialization * @vhost: ibmvfc host struct - * @relogin: is this a re-login? * * Return value: * nothing **/ -static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) +static void ibmvfc_init_host(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; @@ -577,10 +576,8 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) } if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - if (!relogin) { - memset(vhost->async_crq.msgs, 0, PAGE_SIZE); - vhost->async_crq.cur = 0; - } + memset(vhost->async_crq.msgs, 0, PAGE_SIZE); + vhost->async_crq.cur = 0; list_for_each_entry(tgt, &vhost->targets, queue) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); @@ -2303,13 +2300,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) /* Send back a response */ rc = ibmvfc_send_crq_init_complete(vhost); if (rc == 0) - ibmvfc_init_host(vhost, 0); + ibmvfc_init_host(vhost); else dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); break; case IBMVFC_CRQ_INIT_COMPLETE: dev_info(vhost->dev, "Partner initialization complete\n"); - ibmvfc_init_host(vhost, 0); + ibmvfc_init_host(vhost); break; default: dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); @@ -3731,7 +3728,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) case IBMVFC_MAD_SUCCESS: if (list_empty(&vhost->sent) && vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { - ibmvfc_init_host(vhost, 0); + ibmvfc_init_host(vhost); return; } break; -- cgit v1.2.3-59-g8ed1b From 4a5c4a5ed2b8b7fac68368e7ab8cb415dd006418 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:53 -0500 Subject: [SCSI] ibmvfc: Fix adapter cancel flags for terminate_rport_io When issuing a Cancel to the virtual fibre channel adapter, the interface specifies a flags field for the client to indicate what kind of error recovery is being performed. Fix up these flags for terminate_rport_io to indicate an abort task set rather than a target reset. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 696328699ec3..2c73b831544c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2064,12 +2064,24 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) } /** - * ibmvfc_dev_cancel_all - Device iterated cancel all function + * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function * @sdev: scsi device struct * @data: return code * **/ -static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data) +static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); +} + +/** + * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) { unsigned long *rc = data; *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); @@ -2105,7 +2117,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) ENTER; ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); if (!cancel_rc && !reset_rc) @@ -2147,7 +2159,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) int rc = FAILED; ENTER; - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts); starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); if (!cancel_rc && !abort_rc) -- cgit v1.2.3-59-g8ed1b From d31429e1517c007781dfc68aed9b39cb5d3350a1 Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:54 -0500 Subject: [SCSI] ibmvfc: Add FC Passthru support Adds support for FC passthru via BSG. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 279 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/ibmvscsi/ibmvfc.h | 8 +- 2 files changed, 286 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 2c73b831544c..bc9beb8c587c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "ibmvfc.h" static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; @@ -1674,6 +1675,276 @@ static void ibmvfc_sync_completion(struct ibmvfc_event *evt) complete(&evt->comp); } +/** + * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands + * @evt: struct ibmvfc_event + * + **/ +static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + + ibmvfc_free_event(evt); + vhost->aborting_passthru = 0; + dev_info(vhost->dev, "Passthru command cancelled\n"); +} + +/** + * ibmvfc_bsg_timeout - Handle a BSG timeout + * @job: struct fc_bsg_job that timed out + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) +{ + struct ibmvfc_host *vhost = shost_priv(job->shost); + unsigned long port_id = (unsigned long)job->dd_data; + struct ibmvfc_event *evt; + struct ibmvfc_tmf *tmf; + unsigned long flags; + int rc; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) { + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + vhost->aborting_passthru = 1; + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT); + + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + tmf->common.version = 1; + tmf->common.opcode = IBMVFC_TMF_MAD; + tmf->common.length = sizeof(*tmf); + tmf->scsi_id = port_id; + tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; + tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY; + rc = ibmvfc_send_event(evt, vhost, default_timeout); + + if (rc != 0) { + vhost->aborting_passthru = 0; + dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc); + rc = -EIO; + } else + dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n", + port_id); + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + LEAVE; + return rc; +} + +/** + * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command + * @vhost: struct ibmvfc_host to send command + * @port_id: port ID to send command + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) +{ + struct ibmvfc_port_login *plogi; + struct ibmvfc_target *tgt; + struct ibmvfc_event *evt; + union ibmvfc_iu rsp_iu; + unsigned long flags; + int rc = 0, issue_login = 1; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == port_id) { + issue_login = 0; + break; + } + } + + if (!issue_login) + goto unlock_out; + if (unlikely((rc = ibmvfc_host_chkready(vhost)))) + goto unlock_out; + + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + plogi = &evt->iu.plogi; + memset(plogi, 0, sizeof(*plogi)); + plogi->common.version = 1; + plogi->common.opcode = IBMVFC_PORT_LOGIN; + plogi->common.length = sizeof(*plogi); + plogi->scsi_id = port_id; + evt->sync_iu = &rsp_iu; + init_completion(&evt->comp); + + rc = ibmvfc_send_event(evt, vhost, default_timeout); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rc) + return -EIO; + + wait_for_completion(&evt->comp); + + if (rsp_iu.plogi.common.status) + rc = -EIO; + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); +unlock_out: + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; + return rc; +} + +/** + * ibmvfc_bsg_request - Handle a BSG request + * @job: struct fc_bsg_job to be executed + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_bsg_request(struct fc_bsg_job *job) +{ + struct ibmvfc_host *vhost = shost_priv(job->shost); + struct fc_rport *rport = job->rport; + struct ibmvfc_passthru_mad *mad; + struct ibmvfc_event *evt; + union ibmvfc_iu rsp_iu; + unsigned long flags, port_id = -1; + unsigned int code = job->request->msgcode; + int rc = 0, req_seg, rsp_seg, issue_login = 0; + u32 fc_flags, rsp_len; + + ENTER; + job->reply->reply_payload_rcv_len = 0; + if (rport) + port_id = rport->port_id; + + switch (code) { + case FC_BSG_HST_ELS_NOLOGIN: + port_id = (job->request->rqst_data.h_els.port_id[0] << 16) | + (job->request->rqst_data.h_els.port_id[1] << 8) | + job->request->rqst_data.h_els.port_id[2]; + case FC_BSG_RPT_ELS: + fc_flags = IBMVFC_FC_ELS; + break; + case FC_BSG_HST_CT: + issue_login = 1; + port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) | + (job->request->rqst_data.h_ct.port_id[1] << 8) | + job->request->rqst_data.h_ct.port_id[2]; + case FC_BSG_RPT_CT: + fc_flags = IBMVFC_FC_CT_IU; + break; + default: + return -ENOTSUPP; + }; + + if (port_id == -1) + return -EINVAL; + if (!mutex_trylock(&vhost->passthru_mutex)) + return -EBUSY; + + job->dd_data = (void *)port_id; + req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (!req_seg) { + mutex_unlock(&vhost->passthru_mutex); + return -ENOMEM; + } + + rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + if (!rsp_seg) { + dma_unmap_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + mutex_unlock(&vhost->passthru_mutex); + return -ENOMEM; + } + + if (req_seg > 1 || rsp_seg > 1) { + rc = -EINVAL; + goto out; + } + + if (issue_login) + rc = ibmvfc_bsg_plogi(vhost, port_id); + + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) || + unlikely((rc = ibmvfc_host_chkready(vhost)))) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + goto out; + } + + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + mad = &evt->iu.passthru; + + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_PASSTHRU; + mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); + + mad->cmd_ioba.va = (u64)evt->crq.ioba + + offsetof(struct ibmvfc_passthru_mad, iu); + mad->cmd_ioba.len = sizeof(mad->iu); + + mad->iu.cmd_len = job->request_payload.payload_len; + mad->iu.rsp_len = job->reply_payload.payload_len; + mad->iu.flags = fc_flags; + mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; + + mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list); + mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list); + mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list); + mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list); + mad->iu.scsi_id = port_id; + mad->iu.tag = (u64)evt; + rsp_len = mad->iu.rsp.len; + + evt->sync_iu = &rsp_iu; + init_completion(&evt->comp); + rc = ibmvfc_send_event(evt, vhost, 0); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rc) { + rc = -EIO; + goto out; + } + + wait_for_completion(&evt->comp); + + if (rsp_iu.passthru.common.status) + rc = -EIO; + else + job->reply->reply_payload_rcv_len = rsp_len; + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + job->reply->result = rc; + job->job_done(job); + rc = 0; +out: + dma_unmap_sg(vhost->dev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + dma_unmap_sg(vhost->dev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + mutex_unlock(&vhost->passthru_mutex); + LEAVE; + return rc; +} + /** * ibmvfc_reset_device - Reset the device with the specified reset type * @sdev: scsi device to reset @@ -3918,6 +4189,8 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) rport->supported_classes |= FC_COS_CLASS2; if (tgt->service_parms.class3_parms[0] & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; + if (rport->rqst_q) + blk_queue_max_hw_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -4357,6 +4630,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); + mutex_init(&vhost->passthru_mutex); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; @@ -4389,6 +4663,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto remove_shost; } + if (shost_to_fc_host(shost)->rqst_q) + blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); @@ -4517,6 +4793,9 @@ static struct fc_function_template ibmvfc_transport_functions = { .get_starget_port_id = ibmvfc_get_starget_port_id, .show_starget_port_id = 1, + + .bsg_request = ibmvfc_bsg_request, + .bsg_timeout = ibmvfc_bsg_timeout, }; /** diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 007fa1c9ef14..77513b4dd9ae 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -58,9 +58,10 @@ * 1 for ERP * 1 for initialization * 1 for NPIV Logout + * 2 for BSG passthru * 2 for each discovery thread */ -#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2)) #define IBMVFC_MAD_SUCCESS 0x00 #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 @@ -466,7 +467,10 @@ struct ibmvfc_passthru_iu { u16 error; u32 flags; #define IBMVFC_FC_ELS 0x01 +#define IBMVFC_FC_CT_IU 0x02 u32 cancel_key; +#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000 +#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001 u32 reserved; struct srp_direct_buf cmd; struct srp_direct_buf rsp; @@ -693,6 +697,7 @@ struct ibmvfc_host { int disc_buf_sz; int log_level; struct ibmvfc_discover_targets_buf *disc_buf; + struct mutex passthru_mutex; int task_set; int init_retries; int discovery_threads; @@ -702,6 +707,7 @@ struct ibmvfc_host { int delay_init; int scan_complete; int logged_in; + int aborting_passthru; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 -- cgit v1.2.3-59-g8ed1b From 8da85e451d1e9c37bd2846f032c9d5ffa1234d1f Mon Sep 17 00:00:00 2001 From: Brian King Date: Mon, 19 Oct 2009 15:07:55 -0500 Subject: [SCSI] ibmvfc: Driver version 1.0.7 Bump driver version to 1.0.7. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 77513b4dd9ae..d25106a958d7 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.6" -#define IBMVFC_DRIVER_DATE "(May 28, 2009)" +#define IBMVFC_DRIVER_VERSION "1.0.7" +#define IBMVFC_DRIVER_DATE "(October 16, 2009)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 -- cgit v1.2.3-59-g8ed1b From 85b5893ca97c69e409ecbb5ee90a5d99882369c4 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 21 Oct 2009 16:26:45 -0700 Subject: [SCSI] libfc: fix typo in retry check on received PRLI A received Fibre Channel ELS PRLI request contains a bit that indicates whether the remote port supports certain retry processing sequences. The test for this bit was somehow coded to use multiply instead of AND! This case would apply only for target mode operation, and it is unlikely to be noticed as an initiator. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 03ea6748e7ee..bdc973278d8d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1402,7 +1402,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, break; case FC_TYPE_FCP: fcp_parm = ntohl(rspp->spp_params); - if (fcp_parm * FCP_SPPF_RETRY) + if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) -- cgit v1.2.3-59-g8ed1b From 5e472d077f45de4f37365171bd742f18b3ef20de Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:26:50 -0700 Subject: [SCSI] libfc: fix ddp in fc_fcp for 0 xid xid 0 was used as an indication of invalid xid before but now xid 0 can be used as a valid exchange i. This patch fixes the ddp completion in fcp layer, i.e., in fc_fcp.c:fc_fcp_ddp_done() function, to make sure it does not use xid 0 for indication of an invalid xid, instead, it now uses use FC_XID_UNKNOWN for such indication. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 59a4408b27b5..e6c6f4742368 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -302,10 +302,13 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) if (!fsp) return; + if (fsp->xfer_ddp == FC_XID_UNKNOWN) + return; + lp = fsp->lp; - if (fsp->xfer_ddp && lp->tt.ddp_done) { + if (lp->tt.ddp_done) { fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); - fsp->xfer_ddp = 0; + fsp->xfer_ddp = FC_XID_UNKNOWN; } } @@ -1708,6 +1711,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) fsp->cmd = sc_cmd; /* save the cmd */ fsp->lp = lp; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ + fsp->xfer_ddp = FC_XID_UNKNOWN; sc_cmd->scsi_done = done; /* -- cgit v1.2.3-59-g8ed1b From b04d023cf5b7f4113cc4a09405c2fe8003bfe37d Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:26:55 -0700 Subject: [SCSI] fcoe: remove redundant checking of netdev->netdev_ops Remove the redundant checking of netdev->netdev_ops as it will never be NULL. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 704b8e034946..7c898875838f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -664,7 +664,7 @@ static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, { struct net_device *n = fcoe_netdev(lp); - if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup) + if (n->netdev_ops->ndo_fcoe_ddp_setup) return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); return 0; @@ -681,7 +681,7 @@ static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) { struct net_device *n = fcoe_netdev(lp); - if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done) + if (n->netdev_ops->ndo_fcoe_ddp_done) return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); return 0; } -- cgit v1.2.3-59-g8ed1b From 473e28563fbb038515d4616546297483d3727c02 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 21 Oct 2009 16:27:01 -0700 Subject: [SCSI] libfc, fcoe: Don't EXPORT_SYMBOLS unnecessarily These are a few functions that were not used by other modules. They did not need to be exported so this patch removes the EXPORT_SYMBOLS call for each. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 1 - drivers/scsi/libfc/fc_fcp.c | 1 - drivers/scsi/libfc/fc_frame.c | 1 - drivers/scsi/libfc/fc_rport.c | 2 -- 4 files changed, 5 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index c1c15748220c..ae8f9e9ac958 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -422,7 +422,6 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) error = -ENOBUFS; return error; } -EXPORT_SYMBOL(fc_seq_exch_abort); /* * Exchange timeout - handle exchange timer expiration. diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e6c6f4742368..e613eb80d3a3 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -285,7 +285,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) fsp->xfer_ddp = xid; } } -EXPORT_SYMBOL(fc_fcp_ddp_setup); /* * fc_fcp_ddp_done - calls to LLD's ddp_done to release any diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 63fe00cfe667..ac3681ae68d9 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -69,7 +69,6 @@ struct fc_frame *__fc_frame_alloc(size_t len) } EXPORT_SYMBOL(__fc_frame_alloc); - struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { struct fc_frame *fp; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index bdc973278d8d..1f795e4e4742 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1565,13 +1565,11 @@ int fc_setup_rport(void) return -ENOMEM; return 0; } -EXPORT_SYMBOL(fc_setup_rport); void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } -EXPORT_SYMBOL(fc_destroy_rport); void fc_rport_terminate_io(struct fc_rport *rport) { -- cgit v1.2.3-59-g8ed1b From c340111dbb48482cd23f4e441deff9169be9bc6f Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 21 Oct 2009 16:27:06 -0700 Subject: [SCSI] libfc: Remove unused fc_lport pointer from fc_fcp_pkt_abort This argument isn't used, let's not pass it into the routine. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e613eb80d3a3..ade962d74fb9 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1097,7 +1097,7 @@ unlock: * Scsi abort handler- calls to send an abort * and then wait for abort completion */ -static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) +static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) { int rc = FAILED; @@ -1945,7 +1945,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) goto release_pkt; } - rc = fc_fcp_pkt_abort(lp, fsp); + rc = fc_fcp_pkt_abort(fsp); fc_fcp_unlock_pkt(fsp); release_pkt: -- cgit v1.2.3-59-g8ed1b From 4347fa66878e079766258bc0d077c350cb31a799 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:27:12 -0700 Subject: [SCSI] libfc: Fix wrong scsi return status under FC_DATA_UNDRUN This bug is exposed when there is a link flap in LLD. Particularly, when it happens right after a SCSI write command is sent out, no FCP_DATA is sent, causing fsp->status_code to be set as FC_DATA_UNDRUN in fc_fcp_complete_locked even no SCSI status is received. Consequently, fc_io_compl treats this as DID_OK. This results in SCSI returning successful to the initial I/O request even there is no DATA actually sent. Particularly, if you run an I/O tool w/ data verification on, the read back for verification is gonna fail. This is fixed here by checking when FC_DATA_UNDRUN happens, SCSI status is received w/ FC_SRB_RCV_STATUS set in fsp->state. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index ade962d74fb9..40ed7442d9df 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1849,7 +1849,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) * scsi status is good but transport level * underrun. */ - sc_cmd->result = DID_OK << 16; + sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? + DID_OK : DID_ERROR) << 16; } else { /* * scsi got underrun, this is an error -- cgit v1.2.3-59-g8ed1b From 1b69bc062c2a4c8f3e15ac69f487afec3aa8d774 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 21 Oct 2009 16:27:17 -0700 Subject: [SCSI] libfc: lport: fix minor documentation errors Fix minor errors. A debug message said an RLIR was received instead of ECHO. "Expected" was misspelled in several places. Fix a type cast from u32 to __be32. Rob, Some of these may have been also taken care of in your other doc cleanup patch. Feel free to fold them in. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd2f77197447..eefe87d8efb3 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -329,7 +329,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) * @sp: current sequence in the RLIR exchange * @fp: RLIR request frame * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, @@ -348,7 +348,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, * @sp: current sequence in the ECHO exchange * @fp: ECHO request frame * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, @@ -361,7 +361,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, void *dp; u32 f_ctl; - FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", + FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); @@ -374,7 +374,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, if (fp) { dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); - *((u32 *)dp) = htonl(ELS_LS_ACC << 24); + *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); sp = lport->tt.seq_start_next(sp); f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, @@ -385,12 +385,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, } /** - * fc_lport_recv_echo_req() - Handle received Request Node ID data request - * @lport: Fibre Channel local port recieving the RNID - * @sp: current sequence in the RNID exchange - * @fp: RNID request frame + * fc_lport_recv_rnid_req() - Handle received Request Node ID data request + * @sp: The sequence in the RNID exchange + * @fp: The RNID request frame + * @lport: The local port recieving the RNID * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, @@ -667,7 +667,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport) * Accept it with the common service parameters indicating our N port. * Set up to do a PLOGI if we have the higher-number WWPN. * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, -- cgit v1.2.3-59-g8ed1b From 22655ac22289d7b7def8ef2d72eafe5024bd57fe Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 21 Oct 2009 16:27:22 -0700 Subject: [SCSI] libfc: don't WARN_ON in lport_timeout for RESET state It's possible and harmless to get FLOGI timeouts while in RESET state. Don't do a WARN_ON in that case. Also, split out the other WARN_ONs in fc_lport_timeout, so we can tell which one is hit by its line number. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index eefe87d8efb3..0d19ffa88716 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1237,10 +1237,13 @@ static void fc_lport_timeout(struct work_struct *work) switch (lport->state) { case LPORT_ST_DISABLED: + WARN_ON(1); + break; case LPORT_ST_READY: - case LPORT_ST_RESET: WARN_ON(1); break; + case LPORT_ST_RESET: + break; case LPORT_ST_FLOGI: fc_lport_enter_flogi(lport); break; -- cgit v1.2.3-59-g8ed1b From 89f19a59de0ec4626c64d90d2f5e255961cab879 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 21 Oct 2009 16:27:28 -0700 Subject: [SCSI] libfc: removes initializing fc_cpu_order and fc_cpu_mask per lport Initializing these libfc globals per lport could mess up exch allocation/free for existing lport. So this patch moves their initialization to fc_setup_exch_mgr so that these globals gets initialized only once for libfc. Reported-by: Alex Lyakas Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index ae8f9e9ac958..bdae9a9e7ae9 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2046,6 +2046,20 @@ int fc_exch_init(struct fc_lport *lp) if (!lp->tt.seq_exch_abort) lp->tt.seq_exch_abort = fc_seq_exch_abort; + return 0; +} +EXPORT_SYMBOL(fc_exch_init); + +/** + * fc_setup_exch_mgr() - Setup an exchange manager + */ +int fc_setup_exch_mgr() +{ + fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fc_em_cachep) + return -ENOMEM; + /* * Initialize fc_cpu_mask and fc_cpu_order. The * fc_cpu_mask is set for nr_cpu_ids rounded up @@ -2070,16 +2084,6 @@ int fc_exch_init(struct fc_lport *lp) return 0; } -EXPORT_SYMBOL(fc_exch_init); - -int fc_setup_exch_mgr(void) -{ - fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!fc_em_cachep) - return -ENOMEM; - return 0; -} void fc_destroy_exch_mgr(void) { -- cgit v1.2.3-59-g8ed1b From 3f127ad97a985d43b3cdf4b644e77a775b6035d4 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 21 Oct 2009 16:27:33 -0700 Subject: [SCSI] libfc: adds missing exch release for accepted RRQ Adds missing exch release when RRQ is accepted by calling fc_seq_ls_acc. Adds common exch release for fc_exch_els_rrq by use of out label. Reported-by: Alex Lyakas Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index bdae9a9e7ae9..8ce418296537 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1718,7 +1718,7 @@ retry: */ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) { - struct fc_exch *ep; /* request or subject exchange */ + struct fc_exch *ep = NULL; /* request or subject exchange */ struct fc_els_rrq *rp; u32 sid; u16 xid; @@ -1768,15 +1768,16 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) * Send LS_ACC. */ fc_seq_ls_acc(sp); - fc_frame_free(fp); - return; + goto out; unlock_reject: spin_unlock_bh(&ep->ex_lock); - fc_exch_release(ep); /* drop hold from fc_exch_find */ reject: fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); +out: fc_frame_free(fp); + if (ep) + fc_exch_release(ep); /* drop hold from fc_exch_find */ } struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, -- cgit v1.2.3-59-g8ed1b From e95147d8fa4e63bf6d8ff249f074d0047338fc61 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 21 Oct 2009 16:27:39 -0700 Subject: [SCSI] libfc: removes unused disc_work and ex_list Reported-by: Alex Lyakas Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 1 - include/scsi/libfc.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 8ce418296537..170cdf4bac97 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -75,7 +75,6 @@ struct fc_exch_mgr { struct kref kref; /* exchange mgr reference count */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ - struct list_head ex_list; /* allocated exchanges list */ mempool_t *ep_pool; /* reserve ep's */ u16 pool_max_index; /* max exch array index in exch pool */ struct fc_exch_pool *pool; /* per cpu exch pool */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 65dc9aacbf70..4ff148580562 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -742,7 +742,6 @@ struct fc_lport { /* Miscellaneous */ struct delayed_work retry_work; - struct delayed_work disc_work; }; /* -- cgit v1.2.3-59-g8ed1b From 8eca355fa8af660557fbdd5506bde1392eee9bfe Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 21 Oct 2009 16:27:44 -0700 Subject: [SCSI] fcoe: initialize return value in fcoe_destroy When doing echo ethX > /sys..../destroy I am getting errors when the tear down succeeds. It looks like the reason for this is because the rc var is not getting set when the destruction works. This just sets it to zero. Signed-off-by: Mike Christie Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 7c898875838f..8702c8d728dd 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1631,7 +1631,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) { struct fcoe_interface *fcoe; struct net_device *netdev; - int rc; + int rc = 0; mutex_lock(&fcoe_config_mutex); #ifdef CONFIG_FCOE_MODULE -- cgit v1.2.3-59-g8ed1b From 7221d7e59d1c675828b6de50b757cd8282011a5d Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:27:52 -0700 Subject: [SCSI] fcoe: Use NETIF_F_FCOE_MTU flag to set up max frame size (lport->mfs) Add a define of FCOE_MTU as 2158 bytes and use FCOE_MTU when the LLD is found to support NETIF_F_FCOE_MTU. The lport->mfs is then calculated out of the 2158 FCOE_MTU. Otherwise, we stick with the netdev->mtu, i.e., LAN MTU. Also, change the notification on NETDEV_CHANGEMTU event to bypass changing mfs when LAN MTU is changed if NETIF_F_FCOE_MTU is supported. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 10 ++++++++-- drivers/scsi/fcoe/fcoe.h | 6 ++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 8702c8d728dd..c66b9fa7d674 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -439,8 +439,12 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) * user-configured limit. If the MFS is too low, fcoe_link_ok() * will return 0, so do this first. */ - mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); + mfs = netdev->mtu; + if (netdev->features & NETIF_F_FCOE_MTU) { + mfs = FCOE_MTU; + FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); + } + mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); if (fc_set_mfs(lp, mfs)) return -EINVAL; @@ -1570,6 +1574,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, case NETDEV_CHANGE: break; case NETDEV_CHANGEMTU: + if (netdev->features & NETIF_F_FCOE_MTU) + break; mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); if (mfs >= FC_MIN_MAX_FRAME) diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index ce7f60fb1bc0..c578082aef8b 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -40,6 +40,12 @@ #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ +/* + * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload) + * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes + */ +#define FCOE_MTU 2158 + unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -- cgit v1.2.3-59-g8ed1b From d37322a43ebac79eef417149f5696390cf8872db Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:27:58 -0700 Subject: [SCSI] libfc: Fix frags in frame exceeding SKB_MAX_FRAGS in fc_fcp_send_data In case of sequence offload, in fc_fcp_send_data(), the skb_fill_page_info() called may end up adding more frags to the skb_shinfo(fp_skb(fp))->frags[], exceeding SKB_MAX_FRAGS, this eventually corrupts the memory. I am adding the FR_FRAME_SG_LEN back, but as SKB_MAX_FRAGS -1, leaving 1 for our fcoe_eof_crc page. And send will be broken into multiple large sends if the frame already contains more frags than skb handle. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 3 ++- include/scsi/fc_frame.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 40ed7442d9df..28bfe1c2c50a 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -574,7 +574,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, tlen -= sg_bytes; remaining -= sg_bytes; - if (tlen) + if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && + (tlen)) continue; /* diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index c35d2383cc26..148126dcf9e9 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -37,6 +37,9 @@ #define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ #define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ +/* Max number of skb frags allowed, reserving one for fcoe_crc_eof page */ +#define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1) + #define fp_skb(fp) (&((fp)->skb)) #define fr_hdr(fp) ((fp)->skb.data) #define fr_len(fp) ((fp)->skb.len) -- cgit v1.2.3-59-g8ed1b From b7a727f1af953b00352d3a4b6c458c6e2872f94b Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 21 Oct 2009 16:28:03 -0700 Subject: [SCSI] fcoe: Call ndo_fcoe_enable/disable to turn FCoE feature on/off in LLD Calls ndo_fcoe_enabled() of the associated netdev upon creating the FCoE instance to make sure LLD has all necessary resources allocated and setup properly before passing FCoE traffic. Similarly, calls ndo_fcoe_disable() upon destroying the FCoE instance on the associated netdev to allow the LLD to release all allocated resources for FCoE. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c66b9fa7d674..aef29afb6e71 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -161,9 +161,18 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, struct fcoe_ctlr *fip = &fcoe->ctlr; struct netdev_hw_addr *ha; u8 flogi_maddr[ETH_ALEN]; + const struct net_device_ops *ops; fcoe->netdev = netdev; + /* Let LLD initialize for FCoE */ + ops = netdev->netdev_ops; + if (ops->ndo_fcoe_enable) { + if (ops->ndo_fcoe_enable(netdev)) + FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" + " specific feature for LLD.\n"); + } + /* Do not support for bonding device */ if ((netdev->priv_flags & IFF_MASTER_ALB) || (netdev->priv_flags & IFF_SLAVE_INACTIVE) || @@ -262,6 +271,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) struct net_device *netdev = fcoe->netdev; struct fcoe_ctlr *fip = &fcoe->ctlr; u8 flogi_maddr[ETH_ALEN]; + const struct net_device_ops *ops; /* * Don't listen for Ethernet packets anymore. @@ -281,6 +291,14 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) if (fip->spma) dev_unicast_delete(netdev, fip->ctl_src_addr); dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + + /* Tell the LLD we are done w/ FCoE */ + ops = netdev->netdev_ops; + if (ops->ndo_fcoe_disable) { + if (ops->ndo_fcoe_disable(netdev)) + FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" + " specific feature for LLD.\n"); + } } /** -- cgit v1.2.3-59-g8ed1b From 8f550f937e9fdafa5c37e348e214aecec851ef3f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Wed, 21 Oct 2009 16:28:09 -0700 Subject: [SCSI] libfc: fix memory corruption caused by double frees and bad error handling I was running into several different panics under stress, which I traced down to a few different possible slab corruption issues in error handling paths. I have not yet looked into why these exchange sends fail, but with these fixes my test system is much more stable under stress than before. fc_elsct_send() could fail and either leave the passed in frame intact (failure in fc_ct/els_fill) or the frame could have been freed if the failure was is fc_exch_seq_send(). The caller had no way of knowing, and there was a potential double free in the error handling in fc_fcp_rec(). Make fc_elsct_send() always free the frame before returning, and remove the fc_frame_free() call in fc_fcp_rec(). While fc_exch_seq_send() did always consume the frame, there were double free bugs in the error handling of fc_fcp_cmd_send() and fc_fcp_srr() as well. Numerous calls to error handling routines (fc_disc_error(), fc_lport_error(), fc_rport_error_retry() ) were passing in a frame pointer that had already been freed in the case of an error. I have changed the call sites to pass in a NULL pointer, but there may be more appropriate error codes to use. Question: Why do these error routines take a frame pointer anyway? I understand passing in a pointer encoded error to the response handlers, but the error routines take no action on a valid pointer and should never be called that way. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 2 +- drivers/scsi/libfc/fc_elsct.c | 4 +++- drivers/scsi/libfc/fc_fcp.c | 7 ++----- drivers/scsi/libfc/fc_lport.c | 8 ++++---- drivers/scsi/libfc/fc_rport.c | 10 +++++----- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index c48799e9dd8e..d4cb3f9b1a0d 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -371,7 +371,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) disc, lport->e_d_tov)) return; err: - fc_disc_error(disc, fp); + fc_disc_error(disc, NULL); } /** diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5cfa68732e9d..92984587ff4d 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -53,8 +53,10 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, did = FC_FID_DIR_SERV; } - if (rc) + if (rc) { + fc_frame_free(fp); return NULL; + } fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 28bfe1c2c50a..a67f53a5026c 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1051,7 +1051,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); if (!seq) { - fc_frame_free(fp); rc = -1; goto unlock; } @@ -1316,7 +1315,6 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; } - fc_frame_free(fp); retry: if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); @@ -1564,10 +1562,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); - if (!seq) { - fc_frame_free(fp); + if (!seq) goto retry; - } + fsp->recov_seq = seq; fsp->xfer_len = offset; fsp->xfer_contig_end = offset; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 0d19ffa88716..536492ae6a88 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1115,7 +1115,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport) if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, fc_lport_scr_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_error(lport, NULL); } /** @@ -1186,7 +1186,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, fc_lport_rpn_id_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_error(lport, NULL); } static struct fc_rport_operations fc_lport_rport_ops = { @@ -1340,7 +1340,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, fc_lport_logo_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_error(lport, NULL); } /** @@ -1456,7 +1456,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, fc_lport_flogi_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_error(lport, NULL); } /* Configure a fc_lport */ diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 1f795e4e4742..49abb839a223 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -632,7 +632,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } @@ -793,7 +793,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, fc_rport_prli_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } @@ -889,7 +889,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } @@ -919,7 +919,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, fc_rport_logo_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } @@ -1006,7 +1006,7 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, fc_rport_adisc_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } -- cgit v1.2.3-59-g8ed1b From d5cf4b28e13989ace24cf26de1e1debec18e9685 Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Wed, 21 Oct 2009 16:28:14 -0700 Subject: [SCSI] fnic: Process all cq entries per ISR Driver was processing a fixed max number of cq descriptors per ISR. For instance, for the SCSI IO queue, number of IOs processed per ISR were 8. If hardware writes 9 cq descriptors to the cq and generates an interrupt, driver would process only 8 descriptors and decrement the outstanding credit count by 8. Unless another interrupt event happens, the hw does not generate any additional interrupt. This results in the cq descriptor sitting in the queue without being procesed and can cause IO timeouts and aborts. Modify all ISR functions to process all queued cq descriptors in one shot. Since bulk of ELS frame processing is done in thread context and bulk of SCSI IO processing is done in soft ISR deferred context, the cycles spent in the ISR per cq descriptor is small. Signed-off-by: Herman Lee Signed-off-by: Abhijeet Joglekar Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_isr.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 2b3064828aea..5c1f223cabce 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -48,9 +48,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { - work_done += fnic_wq_copy_cmpl_handler(fnic, 8); - work_done += fnic_wq_cmpl_handler(fnic, 4); - work_done += fnic_rq_cmpl_handler(fnic, 4); + work_done += fnic_wq_copy_cmpl_handler(fnic, -1); + work_done += fnic_wq_cmpl_handler(fnic, -1); + work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], work_done, @@ -66,9 +66,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) struct fnic *fnic = data; unsigned long work_done = 0; - work_done += fnic_wq_copy_cmpl_handler(fnic, 8); - work_done += fnic_wq_cmpl_handler(fnic, 4); - work_done += fnic_rq_cmpl_handler(fnic, 4); + work_done += fnic_wq_copy_cmpl_handler(fnic, -1); + work_done += fnic_wq_cmpl_handler(fnic, -1); + work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[0], work_done, @@ -83,7 +83,7 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data) struct fnic *fnic = data; unsigned long rq_work_done = 0; - rq_work_done = fnic_rq_cmpl_handler(fnic, 4); + rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, 1 /* unmask intr */, @@ -97,7 +97,7 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data) struct fnic *fnic = data; unsigned long wq_work_done = 0; - wq_work_done = fnic_wq_cmpl_handler(fnic, 4); + wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, 1 /* unmask intr */, @@ -110,7 +110,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; - wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, -- cgit v1.2.3-59-g8ed1b From f9bdc3da4c9c2af4886bc6a562effc05cbf75234 Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Wed, 21 Oct 2009 16:28:19 -0700 Subject: [SCSI] fnic: Set max_cmd_len to driver supported CDB length Signed-off-by: Abhijeet Joglekar Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic.h | 2 +- drivers/scsi/fnic/fnic_main.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index e4c0a3d7d87b..1bc267e892d2 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -44,7 +44,7 @@ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_DFLT_QUEUE_DEPTH 32 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ - +#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */ /* * Tag bits used for special requests. */ diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 71c7bbe26d05..b0d425ab30ab 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -560,6 +560,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, } host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; + host->max_cmd_len = FNIC_MAX_CMD_LEN; fnic_get_res_counts(fnic); -- cgit v1.2.3-59-g8ed1b From 4b53662bd594941e5e5e540baaaff6a3e66d062c Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Wed, 21 Oct 2009 16:28:25 -0700 Subject: [SCSI] fnic: Pad the unused bytes of CDB to 0s Signed-off-by: Abhijeet Joglekar Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_res.h | 4 +++- drivers/scsi/fnic/fnic_scsi.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h index b6f310262534..88c4471c18f0 100644 --- a/drivers/scsi/fnic/fnic_res.h +++ b/drivers/scsi/fnic/fnic_res.h @@ -58,6 +58,7 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, u64 sgl_addr, u64 sns_addr, u8 crn, u8 pri_ta, u8 flags, u8 *scsi_cdb, + u8 cdb_len, u32 data_len, u8 *lun, u32 d_id, u16 mss, u32 ratov, u32 edtov) @@ -82,7 +83,8 @@ static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ desc->u.icmnd_16.flags = flags; /* command flags */ - memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ + memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16); + memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */ desc->u.icmnd_16.data_len = data_len; /* length of data expected */ memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ desc->u.icmnd_16._resvd2 = 0; /* reserved */ diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index bfc996971b81..b5d17385939b 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -319,7 +319,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, 0, /* scsi cmd ref, always 0 */ pri_tag, /* scsi pri and tag */ flags, /* command flags */ - sc->cmnd, scsi_bufflen(sc), + sc->cmnd, sc->cmd_len, + scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, rport->maxframe_size, rp->r_a_tov, rp->e_d_tov); -- cgit v1.2.3-59-g8ed1b From b4a9c7ede96e90f7b1ec009ce7256059295e76df Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 21 Oct 2009 16:28:30 -0700 Subject: [SCSI] libfc: fix free of fc_rport_priv with timer pending Timer crashes were caused by freeing a struct fc_rport_priv with a timer pending, causing the timer facility list to be corrupted. This was during FC uplink flap tests with a lot of targets. After discovery, we were doing an PLOGI on an rdata that was in DELETE state but not yet removed from the lookup list. This moved the rdata from DELETE state to PLOGI state. If the PLOGI exchange allocation failed and needed to be retried, the timer scheduling could race with the free being done by fc_rport_work(). When fc_rport_login() is called on a rport in DELETE state, move it to a new state RESTART. In fc_rport_work, when handling a LOGO, STOPPED or FAILED event, look for restart state. In the RESTART case, don't take the rdata off the list and after the transport remote port is deleted and exchanges are reset, re-login to the remote port. Note that the new RESTART state also corrects a problem we had when re-discovering a port that had moved to DELETE state. In that case, a new rdata was created, but the old rdata would do an exchange manager reset affecting the FC_ID for both the new rdata and old rdata. With the new state, the new port isn't logged into until after any old exchanges are reset. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 69 +++++++++++++++++++++++++++++++------------ include/scsi/libfc.h | 1 + 2 files changed, 51 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 49abb839a223..324e156b5d07 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -86,6 +86,7 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_LOGO] = "LOGO", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", + [RPORT_ST_RESTART] = "Restart", }; /** @@ -99,8 +100,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, struct fc_rport_priv *rdata; list_for_each_entry(rdata, &lport->disc.rports, peers) - if (rdata->ids.port_id == port_id && - rdata->rp_state != RPORT_ST_DELETE) + if (rdata->ids.port_id == port_id) return rdata; return NULL; } @@ -235,6 +235,7 @@ static void fc_rport_work(struct work_struct *work) struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; + int restart = 0; mutex_lock(&rdata->rp_mutex); event = rdata->event; @@ -287,8 +288,19 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); if (port_id != FC_FID_DIR_SERV) { + /* + * We must drop rp_mutex before taking disc_mutex. + * Re-evaluate state to allow for restart. + * A transition to RESTART state must only happen + * while disc_mutex is held and rdata is on the list. + */ mutex_lock(&lport->disc.disc_mutex); - list_del(&rdata->peers); + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state == RPORT_ST_RESTART) + restart = 1; + else + list_del(&rdata->peers); + mutex_unlock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); } @@ -312,7 +324,13 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); fc_remote_port_delete(rport); } - kref_put(&rdata->kref, lport->tt.rport_destroy); + if (restart) { + mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "work restart\n"); + fc_rport_enter_plogi(rdata); + mutex_unlock(&rdata->rp_mutex); + } else + kref_put(&rdata->kref, lport->tt.rport_destroy); break; default: @@ -342,6 +360,12 @@ int fc_rport_login(struct fc_rport_priv *rdata) FC_RPORT_DBG(rdata, "ADISC port\n"); fc_rport_enter_adisc(rdata); break; + case RPORT_ST_RESTART: + break; + case RPORT_ST_DELETE: + FC_RPORT_DBG(rdata, "Restart deleted port\n"); + fc_rport_state_enter(rdata, RPORT_ST_RESTART); + break; default: FC_RPORT_DBG(rdata, "Login to port\n"); fc_rport_enter_plogi(rdata); @@ -397,20 +421,21 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) if (rdata->rp_state == RPORT_ST_DELETE) { FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); - mutex_unlock(&rdata->rp_mutex); goto out; } - fc_rport_enter_logo(rdata); + if (rdata->rp_state == RPORT_ST_RESTART) + FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); + else + fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard * the response. */ fc_rport_enter_delete(rdata, RPORT_EV_STOP); - mutex_unlock(&rdata->rp_mutex); - out: + mutex_unlock(&rdata->rp_mutex); return 0; } @@ -466,6 +491,7 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: + case RPORT_ST_RESTART: break; } @@ -499,6 +525,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) fc_rport_enter_logo(rdata); break; case RPORT_ST_DELETE: + case RPORT_ST_RESTART: case RPORT_ST_READY: case RPORT_ST_INIT: break; @@ -1248,6 +1275,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, } break; case RPORT_ST_PRLI: + case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " @@ -1255,11 +1283,14 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, /* XXX TBD - should reset */ break; case RPORT_ST_DELETE: - default: - FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", - rdata->rp_state); - fc_frame_free(rx_fp); - goto out; + case RPORT_ST_LOGO: + case RPORT_ST_RESTART: + FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", + fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_BUSY; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; } /* @@ -1510,14 +1541,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + /* - * If the remote port was created due to discovery, - * log back in. It may have seen a stale RSCN about us. + * If the remote port was created due to discovery, set state + * to log back in. It may have seen a stale RSCN about us. */ - if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) - fc_rport_enter_plogi(rdata); - else - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + if (rdata->disc_id) + fc_rport_state_enter(rdata, RPORT_ST_RESTART); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 4ff148580562..1662d73d85a7 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -145,6 +145,7 @@ enum fc_rport_state { RPORT_ST_LOGO, /* port logout sent */ RPORT_ST_ADISC, /* Discover Address sent */ RPORT_ST_DELETE, /* port being deleted */ + RPORT_ST_RESTART, /* remote port being deleted and will restart */ }; /** -- cgit v1.2.3-59-g8ed1b From bfead3b2cb4607c71831423c3ee97d22cd0c9dcb Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 23 Oct 2009 11:52:33 +0530 Subject: [SCSI] be2iscsi: Adding msix and mcc_rings V3 This patch enables msix for be2iscsi. It also enables use of mcc_rings for fw commands. Since the mcc eq creation is dependent on msix I am sending as one patch Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be.h | 24 +- drivers/scsi/be2iscsi/be_cmds.c | 263 ++++++++++-- drivers/scsi/be2iscsi/be_cmds.h | 37 +- drivers/scsi/be2iscsi/be_iscsi.c | 23 +- drivers/scsi/be2iscsi/be_main.c | 888 +++++++++++++++++++++++++++++---------- drivers/scsi/be2iscsi/be_main.h | 49 ++- drivers/scsi/be2iscsi/be_mgmt.c | 69 ++- drivers/scsi/be2iscsi/be_mgmt.h | 8 +- 8 files changed, 1030 insertions(+), 331 deletions(-) diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index b36020dcf012..a93a5040f087 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -20,8 +20,10 @@ #include #include - -#define FW_VER_LEN 32 +#include +#define FW_VER_LEN 32 +#define MCC_Q_LEN 128 +#define MCC_CQ_LEN 256 struct be_dma_mem { void *va; @@ -74,18 +76,14 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; - char desc[32]; - - /* Adaptive interrupt coalescing (AIC) info */ - bool enable_aic; - u16 min_eqd; /* in usecs */ - u16 max_eqd; /* in usecs */ - u16 cur_eqd; /* in usecs */ + struct beiscsi_hba *phba; + struct be_queue_info *cq; + struct blk_iopoll iopoll; }; struct be_mcc_obj { - struct be_queue_info *q; - struct be_queue_info *cq; + struct be_queue_info q; + struct be_queue_info cq; }; struct be_ctrl_info { @@ -176,8 +174,4 @@ static inline void swap_dws(void *wrb, int len) } while (len); #endif /* __BIG_ENDIAN */ } - -extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, - u16 num_popped); - #endif /* BEISCSI_H */ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 08007b6e42df..10f8fe7a38d2 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -19,6 +19,16 @@ #include "be_mgmt.h" #include "be_main.h" +static void be_mcc_notify(struct beiscsi_hba *phba) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + u32 val = 0; + + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); +} + static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { if (compl->flags != 0) { @@ -54,13 +64,56 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, return 0; } + static inline bool is_link_state_evt(u32 trailer) { return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & - ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE); + ASYNC_TRAILER_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_LINK_STATE); +} + +static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) +{ + struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq; + struct be_mcc_compl *compl = queue_tail_node(mcc_cq); + + if (be_mcc_compl_is_new(compl)) { + queue_tail_inc(mcc_cq); + return compl; + } + return NULL; +} + +static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + +static void beiscsi_async_link_state_process(struct beiscsi_hba *phba, + struct be_async_event_link_state *evt) +{ + switch (evt->port_link_status) { + case ASYNC_EVENT_LINK_DOWN: + SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", + evt->physical_port); + phba->state |= BE_ADAPTER_LINK_DOWN; + break; + case ASYNC_EVENT_LINK_UP: + phba->state = BE_ADAPTER_UP; + SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", + evt->physical_port); + iscsi_host_for_each_session(phba->shost, + be2iscsi_fail_session); + break; + default: + SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" + "Physical Port %d \n", + evt->port_link_status, + evt->physical_port); + } } -void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, +static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, u16 num_popped) { u32 val = 0; @@ -68,7 +121,66 @@ void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, if (arm) val |= 1 << DB_CQ_REARM_SHIFT; val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; - iowrite32(val, ctrl->db + DB_CQ_OFFSET); + iowrite32(val, phba->db_va + DB_CQ_OFFSET); +} + + +int be_process_mcc(struct beiscsi_hba *phba) +{ + struct be_mcc_compl *compl; + int num = 0, status = 0; + struct be_ctrl_info *ctrl = &phba->ctrl; + + spin_lock_bh(&phba->ctrl.mcc_cq_lock); + while ((compl = be_mcc_compl_get(phba))) { + if (compl->flags & CQE_FLAGS_ASYNC_MASK) { + /* Interpret flags as an async trailer */ + BUG_ON(!is_link_state_evt(compl->flags)); + + /* Interpret compl as a async link evt */ + beiscsi_async_link_state_process(phba, + (struct be_async_event_link_state *) compl); + } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { + status = be_mcc_compl_process(ctrl, compl); + atomic_dec(&phba->ctrl.mcc_obj.q.used); + } + be_mcc_compl_use(compl); + num++; + } + + if (num) + beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); + + spin_unlock_bh(&phba->ctrl.mcc_cq_lock); + return status; +} + +/* Wait till no more pending mcc requests are present */ +static int be_mcc_wait_compl(struct beiscsi_hba *phba) +{ +#define mcc_timeout 120000 /* 5s timeout */ + int i, status; + for (i = 0; i < mcc_timeout; i++) { + status = be_process_mcc(phba); + if (status) + return status; + + if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0) + break; + udelay(100); + } + if (i == mcc_timeout) { + dev_err(&phba->pcidev->dev, "mccq poll timed out\n"); + return -1; + } + return 0; +} + +/* Notify MCC requests and wait for completion */ +int be_mcc_notify_wait(struct beiscsi_hba *phba) +{ + be_mcc_notify(phba); + return be_mcc_wait_compl(phba); } static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) @@ -142,6 +254,52 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) return 0; } +/* + * Insert the mailbox address into the doorbell in two steps + * Polls on the mbox doorbell till a command completion (or a timeout) occurs + */ +static int be_mbox_notify_wait(struct beiscsi_hba *phba) +{ + int status; + u32 val = 0; + void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET; + struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem; + struct be_mcc_mailbox *mbox = mbox_mem->va; + struct be_mcc_compl *compl = &mbox->compl; + struct be_ctrl_info *ctrl = &phba->ctrl; + + val |= MPU_MAILBOX_DB_HI_MASK; + /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ + val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; + iowrite32(val, db); + + /* wait for ready to be set */ + status = be_mbox_db_ready_wait(ctrl); + if (status != 0) + return status; + + val = 0; + /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ + val |= (u32)(mbox_mem->dma >> 4) << 2; + iowrite32(val, db); + + status = be_mbox_db_ready_wait(ctrl); + if (status != 0) + return status; + + /* A cq entry has been made now */ + if (be_mcc_compl_is_new(compl)) { + status = be_mcc_compl_process(ctrl, &mbox->compl); + be_mcc_compl_use(compl); + if (status) + return status; + } else { + dev_err(&phba->pcidev->dev, "invalid mailbox completion\n"); + return -1; + } + return 0; +} + void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, bool embedded, u8 sge_cnt) { @@ -203,6 +361,20 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; } +struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + struct be_mcc_wrb *wrb; + + BUG_ON(atomic_read(&mccq->used) >= mccq->len); + wrb = queue_head_node(mccq); + queue_head_inc(mccq); + atomic_inc(&mccq->used); + memset(wrb, 0, sizeof(*wrb)); + return wrb; +} + + int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_queue_info *eq, int eq_delay) { @@ -212,6 +384,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem = &eq->dma_mem; int status; + SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n"); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -249,6 +422,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) int status; u8 *endian_check; + SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n"); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -282,6 +456,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, void *ctxt = &req->context; int status; + SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n"); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -289,7 +464,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); - if (!q_mem->va) SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n"); @@ -329,6 +503,53 @@ static u32 be_encoded_q_len(int q_len) len_encoded = 0; return len_encoded; } + +int be_cmd_mccq_create(struct beiscsi_hba *phba, + struct be_queue_info *mccq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcc_create *req; + struct be_dma_mem *q_mem = &mccq->dma_mem; + struct be_ctrl_info *ctrl; + void *ctxt; + int status; + + spin_lock(&phba->ctrl.mbox_lock); + ctrl = &phba->ctrl; + wrb = wrb_from_mbox(&ctrl->mbox_mem); + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE, sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + + AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, + PCI_FUNC(phba->pcidev->devfn)); + AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(phba); + if (!status) { + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } + spin_unlock(&phba->ctrl.mbox_lock); + + return status; +} + int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, int queue_type) { @@ -337,6 +558,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, u8 subsys = 0, opcode = 0; int status; + SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n"); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -350,6 +572,10 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_CQ_DESTROY; break; + case QTYPE_MCCQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_MCC_DESTROY; + break; case QTYPE_WRBQ: subsys = CMD_SUBSYSTEM_ISCSI; opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; @@ -377,30 +603,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, return status; } -int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr) -{ - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); - int status; - - spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, - OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, - sizeof(*req)); - - status = be_mbox_notify(ctrl); - if (!status) { - struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb); - - memcpy(mac_addr, resp->mac_address, ETH_ALEN); - } - - spin_unlock(&ctrl->mbox_lock); - return status; -} - int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, @@ -412,6 +614,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, void *ctxt = &req->context; int status; + SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n"); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); @@ -468,8 +671,10 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); - if (!status) + if (!status) { wrbq->id = le16_to_cpu(resp->cid); + wrbq->created = true; + } spin_unlock(&ctrl->mbox_lock); return status; } diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index c20d686cbb43..76fe1f9dd4cb 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -47,6 +47,8 @@ struct be_mcc_wrb { #define CQE_FLAGS_VALID_MASK (1 << 31) #define CQE_FLAGS_ASYNC_MASK (1 << 30) +#define CQE_FLAGS_COMPLETED_MASK (1 << 28) +#define CQE_FLAGS_CONSUMED_MASK (1 << 27) /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 @@ -173,7 +175,7 @@ struct be_cmd_req_hdr { u8 domain; /* dword 0 */ u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ - u32 rsvd; /* dword 3 */ + u32 rsvd0; /* dword 3 */ }; struct be_cmd_resp_hdr { @@ -382,7 +384,6 @@ struct be_cmd_req_modify_eq_delay { #define ETH_ALEN 6 - struct be_cmd_req_get_mac_addr { struct be_cmd_req_hdr hdr; u32 nic_port_count; @@ -417,14 +418,21 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, int type); +int be_cmd_mccq_create(struct beiscsi_hba *phba, + struct be_queue_info *mccq, + struct be_queue_info *cq); + int be_poll_mcc(struct be_ctrl_info *ctrl); -unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl); -int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr); +unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba); +int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); +struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); +int be_mcc_notify_wait(struct beiscsi_hba *phba); int be_mbox_notify(struct be_ctrl_info *ctrl); @@ -531,6 +539,23 @@ struct amap_sol_cqe { u8 valid; /* dword 3 */ } __packed; +#define SOL_ICD_INDEX_MASK 0x0003FFC0 +struct amap_sol_cqe_ring { + u8 hw_sts[8]; /* dword 0 */ + u8 i_sts[8]; /* dword 0 */ + u8 i_resp[8]; /* dword 0 */ + u8 i_flags[7]; /* dword 0 */ + u8 s; /* dword 0 */ + u8 i_exp_cmd_sn[32]; /* dword 1 */ + u8 code[6]; /* dword 2 */ + u8 icd_index[12]; /* dword 2 */ + u8 rsvd[6]; /* dword 2 */ + u8 i_cmd_wnd[8]; /* dword 2 */ + u8 i_res_cnt[31]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + + /** * Post WRB Queue Doorbell Register used by the host Storage @@ -664,8 +689,8 @@ struct be_fw_cfg { #define OPCODE_COMMON_TCP_UPLOAD 56 #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ -#define CMD_ISCSI_CONNECTION_INVALIDATE 1 -#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2 +#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 +#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 #define INI_WR_CMD 1 /* Initiator write command */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 2fd25442cfaf..d587b0362f18 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -67,11 +67,11 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; } - cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, - shost, cmds_max, - sizeof(*beiscsi_sess), - sizeof(*io_task), - initial_cmdsn, ISCSI_MAX_TARGET); + cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, + shost, cmds_max, + sizeof(*beiscsi_sess), + sizeof(*io_task), + initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; sess = cls_session->dd_data; @@ -297,7 +297,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: - be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address); + be_cmd_get_mac_addr(phba, phba->mac_address); len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); break; default: @@ -377,16 +377,12 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_offload_params params; - struct iscsi_session *session = conn->session; - struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); - struct beiscsi_hba *phba = iscsi_host_priv(shost); memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); beiscsi_ep = beiscsi_conn->ep; if (!beiscsi_ep) SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n"); - free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle); beiscsi_conn->login_in_progress = 0; beiscsi_set_params_for_offld(beiscsi_conn, ¶ms); beiscsi_offload_connection(beiscsi_conn, ¶ms); @@ -498,6 +494,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, SE_DEBUG(DBG_LVL_1, "shost is NULL \n"); return ERR_PTR(ret); } + + if (phba->state) { + ret = -EBUSY; + SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n"); + return ERR_PTR(ret); + } + ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); if (!ep) { ret = -ENOMEM; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4f1aca346e38..2c3e99eeff82 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -39,7 +39,7 @@ static unsigned int be_iopoll_budget = 10; static unsigned int be_max_phys_size = 64; -static unsigned int enable_msix; +static unsigned int enable_msix = 1; MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); @@ -58,6 +58,17 @@ static int beiscsi_slave_configure(struct scsi_device *sdev) return 0; } +/*------------------- PCI Driver operations and data ----------------- */ +static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); + static struct scsi_host_template beiscsi_sht = { .module = THIS_MODULE, .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", @@ -76,16 +87,8 @@ static struct scsi_host_template beiscsi_sht = { .cmd_per_lun = BEISCSI_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, }; -static struct scsi_transport_template *beiscsi_scsi_transport; -/*------------------- PCI Driver operations and data ----------------- */ -static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { - { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, - { 0 } -}; -MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); +static struct scsi_transport_template *beiscsi_scsi_transport; static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) { @@ -104,7 +107,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; shost->max_lun = BEISCSI_NUM_MAX_LUN; shost->transportt = beiscsi_scsi_transport; - phba = iscsi_host_priv(shost); memset(phba, 0, sizeof(*phba)); phba->shost = shost; @@ -181,6 +183,7 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) return ret; } + pci_set_master(pcidev); if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret) { @@ -203,7 +206,6 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) status = beiscsi_map_pci_bars(phba, pdev); if (status) return status; - mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; mbox_mem_alloc->va = pci_alloc_consistent(pdev, mbox_mem_alloc->size, @@ -219,6 +221,9 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); spin_lock_init(&ctrl->mbox_lock); + spin_lock_init(&phba->ctrl.mcc_lock); + spin_lock_init(&phba->ctrl.mcc_cq_lock); + return status; } @@ -267,6 +272,113 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba, iowrite32(val, phba->db_va + DB_EQ_OFFSET); } +/** + * be_isr_mcc - The isr routine of the driver. + * @irq: Not used + * @dev_id: Pointer to host adapter structure + */ +static irqreturn_t be_isr_mcc(int irq, void *dev_id) +{ + struct beiscsi_hba *phba; + struct be_eq_entry *eqe = NULL; + struct be_queue_info *eq; + struct be_queue_info *mcc; + unsigned int num_eq_processed; + struct be_eq_obj *pbe_eq; + unsigned long flags; + + pbe_eq = dev_id; + eq = &pbe_eq->q; + phba = pbe_eq->phba; + mcc = &phba->ctrl.mcc_obj.cq; + eqe = queue_tail_node(eq); + if (!eqe) + SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); + + num_eq_processed = 0; + + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + spin_lock_irqsave(&phba->isr_lock, flags); + phba->todo_mcc_cq = 1; + spin_unlock_irqrestore(&phba->isr_lock, flags); + } + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; + } + if (phba->todo_mcc_cq) + queue_work(phba->wq, &phba->work_cqs); + if (num_eq_processed) + hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); + + return IRQ_HANDLED; +} + +/** + * be_isr_msix - The isr routine of the driver. + * @irq: Not used + * @dev_id: Pointer to host adapter structure + */ +static irqreturn_t be_isr_msix(int irq, void *dev_id) +{ + struct beiscsi_hba *phba; + struct be_eq_entry *eqe = NULL; + struct be_queue_info *eq; + struct be_queue_info *cq; + unsigned int num_eq_processed; + struct be_eq_obj *pbe_eq; + unsigned long flags; + + pbe_eq = dev_id; + eq = &pbe_eq->q; + cq = pbe_eq->cq; + eqe = queue_tail_node(eq); + if (!eqe) + SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); + + phba = pbe_eq->phba; + num_eq_processed = 0; + if (blk_iopoll_enabled) { + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); + + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; + } + if (num_eq_processed) + hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); + + return IRQ_HANDLED; + } else { + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + spin_lock_irqsave(&phba->isr_lock, flags); + phba->todo_cq = 1; + spin_unlock_irqrestore(&phba->isr_lock, flags); + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; + } + if (phba->todo_cq) + queue_work(phba->wq, &phba->work_cqs); + + if (num_eq_processed) + hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); + + return IRQ_HANDLED; + } +} + /** * be_isr - The isr routine of the driver. * @irq: Not used @@ -280,48 +392,70 @@ static irqreturn_t be_isr(int irq, void *dev_id) struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; struct be_queue_info *cq; + struct be_queue_info *mcc; unsigned long flags, index; - unsigned int num_eq_processed; + unsigned int num_mcceq_processed, num_ioeq_processed; struct be_ctrl_info *ctrl; + struct be_eq_obj *pbe_eq; int isr; phba = dev_id; - if (!enable_msix) { - ctrl = &phba->ctrl;; - isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + - (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); - if (!isr) - return IRQ_NONE; - } + ctrl = &phba->ctrl;; + isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + + (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); + if (!isr) + return IRQ_NONE; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - eq = &phwi_context->be_eq.q; - cq = &phwi_context->be_cq; + pbe_eq = &phwi_context->be_eq[0]; + + eq = &phwi_context->be_eq[0].q; + mcc = &phba->ctrl.mcc_obj.cq; index = 0; eqe = queue_tail_node(eq); if (!eqe) SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); - num_eq_processed = 0; + num_ioeq_processed = 0; + num_mcceq_processed = 0; if (blk_iopoll_enabled) { while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { - if (!blk_iopoll_sched_prep(&phba->iopoll)) - blk_iopoll_sched(&phba->iopoll); - + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + spin_lock_irqsave(&phba->isr_lock, flags); + phba->todo_mcc_cq = 1; + spin_unlock_irqrestore(&phba->isr_lock, flags); + num_mcceq_processed++; + } else { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); + num_ioeq_processed++; + } AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); - num_eq_processed++; - SE_DEBUG(DBG_LVL_8, "Valid EQE\n"); } - if (num_eq_processed) { - hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1); + if (num_ioeq_processed || num_mcceq_processed) { + if (phba->todo_mcc_cq) + queue_work(phba->wq, &phba->work_cqs); + + if ((num_mcceq_processed) && (!num_ioeq_processed)) + hwi_ring_eq_db(phba, eq->id, 0, + (num_ioeq_processed + + num_mcceq_processed) , 1, 1); + else + hwi_ring_eq_db(phba, eq->id, 0, + (num_ioeq_processed + + num_mcceq_processed), 0, 1); + return IRQ_HANDLED; } else return IRQ_NONE; } else { + cq = &phwi_context->be_cq[0]; while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { @@ -339,13 +473,14 @@ static irqreturn_t be_isr(int irq, void *dev_id) AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); - num_eq_processed++; + num_ioeq_processed++; } if (phba->todo_cq || phba->todo_mcc_cq) queue_work(phba->wq, &phba->work_cqs); - if (num_eq_processed) { - hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1); + if (num_ioeq_processed) { + hwi_ring_eq_db(phba, eq->id, 0, + num_ioeq_processed, 1, 1); return IRQ_HANDLED; } else return IRQ_NONE; @@ -355,13 +490,32 @@ static irqreturn_t be_isr(int irq, void *dev_id) static int beiscsi_init_irqs(struct beiscsi_hba *phba) { struct pci_dev *pcidev = phba->pcidev; - int ret; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int ret, msix_vec, i = 0; + char desc[32]; - ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); - if (ret) { - shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" - "Failed to register irq\\n"); - return ret; + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + if (phba->msix_enabled) { + for (i = 0; i < phba->num_cpus; i++) { + sprintf(desc, "beiscsi_msix_%04x", i); + msix_vec = phba->msix_entries[i].vector; + ret = request_irq(msix_vec, be_isr_msix, 0, desc, + &phwi_context->be_eq[i]); + } + msix_vec = phba->msix_entries[i].vector; + ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc", + &phwi_context->be_eq[i]); + } else { + ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, + "beiscsi", phba); + if (ret) { + shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" + "Failed to register irq\\n"); + return ret; + } } return 0; } @@ -378,15 +532,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, iowrite32(val, phba->db_va + DB_CQ_OFFSET); } -/* - * async pdus include - * a. unsolicited NOP-In (target initiated NOP-In) - * b. Async Messages - * c. Reject PDU - * d. Login response - * These headers arrive unprocessed by the EP firmware and iSCSI layer - * process them - */ static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, @@ -397,6 +542,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, { struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; + struct iscsi_task *task; + struct beiscsi_io_task *io_task; + struct iscsi_hdr *login_hdr; switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & PDUBASE_OPCODE_MASK) { @@ -412,6 +560,10 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); break; case ISCSI_OP_LOGIN_RSP: + task = conn->login_task; + io_task = task->dd_data; + login_hdr = (struct iscsi_hdr *)ppdu; + login_hdr->itt = io_task->libiscsi_itt; break; default: shost_printk(KERN_WARNING, phba->shost, @@ -440,7 +592,8 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) io_sgl_alloc_index]; phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; phba->io_sgl_hndl_avbl--; - if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1)) + if (phba->io_sgl_alloc_index == (phba->params. + ios_per_ctrl - 1)) phba->io_sgl_alloc_index = 0; else phba->io_sgl_alloc_index++; @@ -490,9 +643,18 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cid]; - pwrb_handle = pwrb_context->pwrb_handle_base[index]; - pwrb_handle->wrb_index = index; - pwrb_handle->nxt_wrb_index = index; + if (pwrb_context->wrb_handles_available) { + pwrb_handle = pwrb_context->pwrb_handle_base[ + pwrb_context->alloc_index]; + pwrb_context->wrb_handles_available--; + pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index; + if (pwrb_context->alloc_index == + (phba->params.wrbs_per_cxn - 1)) + pwrb_context->alloc_index = 0; + else + pwrb_context->alloc_index++; + } else + pwrb_handle = NULL; return pwrb_handle; } @@ -508,11 +670,19 @@ static void free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle) { + + pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; + pwrb_context->wrb_handles_available++; + if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) + pwrb_context->free_index = 0; + else + pwrb_context->free_index++; + SE_DEBUG(DBG_LVL_8, - "FREE WRB: pwrb_handle=%p free_index=%d=0x%x" + "FREE WRB: pwrb_handle=%p free_index=0x%x" "wrb_handles_available=%d \n", pwrb_handle, pwrb_context->free_index, - pwrb_context->free_index, pwrb_context->wrb_handles_available); + pwrb_context->wrb_handles_available); } static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) @@ -540,6 +710,8 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { + SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n", + phba->eh_sgl_free_index); if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { /* * this can happen if clean_task is called on a task that @@ -572,10 +744,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, u32 resid = 0, exp_cmdsn, max_cmdsn; u8 rsp, status, flags; - exp_cmdsn = be32_to_cpu(psol-> + exp_cmdsn = (psol-> dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); - max_cmdsn = be32_to_cpu((psol-> + max_cmdsn = ((psol-> dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) @@ -610,9 +782,9 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, } if (status == SAM_STAT_CHECK_CONDITION) { + unsigned short *slen = (unsigned short *)sts_bhs->sense_info; sense = sts_bhs->sense_info + sizeof(unsigned short); - sense_len = - cpu_to_be16((unsigned short)(sts_bhs->sense_info[0])); + sense_len = cpu_to_be16(*slen); memcpy(task->sc->sense_buffer, sense, min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); } @@ -620,8 +792,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] & SOL_RES_CNT_MASK) conn->rxdata_octets += (psol-> - dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] - & SOL_RES_CNT_MASK); + dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] + & SOL_RES_CNT_MASK); } unmap: scsi_dma_unmap(io_task->scsi_cmnd); @@ -633,6 +805,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task, struct sol_cqe *psol) { struct iscsi_logout_rsp *hdr; + struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = beiscsi_conn->conn; hdr = (struct iscsi_logout_rsp *)task->hdr; @@ -651,7 +824,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) / 32] & SOL_CMD_WND_MASK) >> 24) - 1); hdr->hlength = 0; - + hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } @@ -661,6 +834,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, { struct iscsi_tm_rsp *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; + struct beiscsi_io_task *io_task = task->dd_data; hdr = (struct iscsi_tm_rsp *)task->hdr; hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] @@ -668,11 +842,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32] & SOL_RESP_MASK); hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, - i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); + i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) / 32] & SOL_CMD_WND_MASK) >> 24) - 1); + hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } @@ -681,18 +856,25 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, struct sol_cqe *psol) { struct hwi_wrb_context *pwrb_context; - struct wrb_handle *pwrb_handle; + struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; + struct iscsi_task *task; + struct beiscsi_io_task *io_task; struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[((psol-> - dw[offsetof(struct amap_sol_cqe, cid) / 32] & - SOL_CID_MASK) >> 6)]; + dw[offsetof(struct amap_sol_cqe, cid) / 32] & + SOL_CID_MASK) >> 6)]; pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; + dw[offsetof(struct amap_sol_cqe, wrb_index) / + 32] & SOL_WRB_INDEX_MASK) >> 16)]; + task = pwrb_handle->pio_handle; + io_task = task->dd_data; + spin_lock(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, io_task->psgl_handle); + spin_unlock(&phba->mgmt_sgl_lock); spin_lock_bh(&session->lock); free_wrb_handle(phba, pwrb_context, pwrb_handle); spin_unlock_bh(&session->lock); @@ -704,6 +886,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, { struct iscsi_nopin *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; + struct beiscsi_io_task *io_task = task->dd_data; hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] @@ -715,6 +898,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) / 32] & SOL_CMD_WND_MASK) >> 24) - 1); hdr->opcode = ISCSI_OP_NOOP_IN; + hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } @@ -726,25 +910,25 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_wrb *pwrb = NULL; struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; - struct beiscsi_io_task *io_task; + unsigned int type; struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr-> - wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] - & SOL_CID_MASK) >> 6)]; + wrb_context[((psol->dw[offsetof + (struct amap_sol_cqe, cid) / 32] + & SOL_CID_MASK) >> 6)]; pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; - + dw[offsetof(struct amap_sol_cqe, wrb_index) / + 32] & SOL_WRB_INDEX_MASK) >> 16)]; task = pwrb_handle->pio_handle; - io_task = task->dd_data; - spin_lock_bh(&session->lock); pwrb = pwrb_handle->pwrb; - switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & - WRB_TYPE_MASK) >> 28) { + type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & + WRB_TYPE_MASK) >> 28; + + spin_lock_bh(&session->lock); + switch (type) { case HWH_TYPE_IO: case HWH_TYPE_IO_RD: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == @@ -774,14 +958,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, default: shost_printk(KERN_WARNING, phba->shost, - "wrb_index 0x%x CID 0x%x\n", - ((psol->dw[offsetof(struct amap_iscsi_wrb, type) / - 32] & SOL_WRB_INDEX_MASK) >> 16), - ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] - & SOL_CID_MASK) >> 6)); + "In hwi_complete_cmd, unknown type = %d" + "wrb_index 0x%x CID 0x%x\n", type, + ((psol->dw[offsetof(struct amap_iscsi_wrb, + type) / 32] & SOL_WRB_INDEX_MASK) >> 16), + ((psol->dw[offsetof(struct amap_sol_cqe, + cid) / 32] & SOL_CID_MASK) >> 6)); break; } - spin_unlock_bh(&session->lock); } @@ -1208,21 +1392,20 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, hwi_post_async_buffers(phba, pasync_handle->is_header); } -static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) + +static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) { - struct hwi_controller *phwi_ctrlr; - struct hwi_context_memory *phwi_context; struct be_queue_info *cq; struct sol_cqe *sol; struct dmsg_cqe *dmsg; unsigned int num_processed = 0; unsigned int tot_nump = 0; struct beiscsi_conn *beiscsi_conn; + struct beiscsi_hba *phba; - phwi_ctrlr = phba->phwi_ctrlr; - phwi_context = phwi_ctrlr->phwi_ctxt; - cq = &phwi_context->be_cq; + cq = pbe_eq->cq; sol = queue_tail_node(cq); + phba = pbe_eq->phba; while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & CQE_VALID_MASK) { @@ -1237,11 +1420,11 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) "Connection table empty for cid = %d\n", (u32)(sol->dw[offsetof(struct amap_sol_cqe, cid) / 32] & SOL_CID_MASK) >> 6); - return 0; - } + return 0; + } if (num_processed >= 32) { - hwi_ring_cq_db(phba, phwi_context->be_cq.id, + hwi_ring_cq_db(phba, cq->id, num_processed, 0, 0); tot_nump += num_processed; num_processed = 0; @@ -1258,8 +1441,12 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); break; case UNSOL_HDR_NOTIFY: + SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n"); + hwi_process_default_pdu_ring(beiscsi_conn, phba, + (struct i_t_dpdu_cqe *)sol); + break; case UNSOL_DATA_NOTIFY: - SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n"); + SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n"); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); break; @@ -1306,7 +1493,7 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) case CXN_KILLED_OVER_RUN_RESIDUAL: case CXN_KILLED_UNDER_RUN_RESIDUAL: case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: - SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID " + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " "0x%x...\n", sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK, @@ -1317,8 +1504,8 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) break; case CXN_KILLED_RST_SENT: case CXN_KILLED_RST_RCVD: - SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent " - "on CID 0x%x...\n", + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" + "received/sent on CID 0x%x...\n", sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK, sol->dw[offsetof(struct amap_sol_cqe, cid) / @@ -1344,8 +1531,7 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) if (num_processed > 0) { tot_nump += num_processed; - hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed, - 1, 0); + hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); } return tot_nump; } @@ -1353,21 +1539,30 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) static void beiscsi_process_all_cqs(struct work_struct *work) { unsigned long flags; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_eq_obj *pbe_eq; struct beiscsi_hba *phba = container_of(work, struct beiscsi_hba, work_cqs); + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + if (phba->msix_enabled) + pbe_eq = &phwi_context->be_eq[phba->num_cpus]; + else + pbe_eq = &phwi_context->be_eq[0]; + if (phba->todo_mcc_cq) { spin_lock_irqsave(&phba->isr_lock, flags); phba->todo_mcc_cq = 0; spin_unlock_irqrestore(&phba->isr_lock, flags); - SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n"); } if (phba->todo_cq) { spin_lock_irqsave(&phba->isr_lock, flags); phba->todo_cq = 0; spin_unlock_irqrestore(&phba->isr_lock, flags); - beiscsi_process_cq(phba); + beiscsi_process_cq(pbe_eq); } } @@ -1375,19 +1570,15 @@ static int be_iopoll(struct blk_iopoll *iop, int budget) { static unsigned int ret; struct beiscsi_hba *phba; + struct be_eq_obj *pbe_eq; - phba = container_of(iop, struct beiscsi_hba, iopoll); - - ret = beiscsi_process_cq(phba); + pbe_eq = container_of(iop, struct be_eq_obj, iopoll); + ret = beiscsi_process_cq(pbe_eq); if (ret < budget) { - struct hwi_controller *phwi_ctrlr; - struct hwi_context_memory *phwi_context; - - phwi_ctrlr = phba->phwi_ctrlr; - phwi_context = phwi_ctrlr->phwi_ctxt; + phba = pbe_eq->phba; blk_iopoll_complete(iop); - hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0, - 0, 1, 1); + SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id); + hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } return ret; } @@ -1537,14 +1728,12 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { - unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages; + unsigned int num_cq_pages, num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ sizeof(struct sol_cqe)); - num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ - sizeof(struct be_eq_entry)); num_async_pdu_buf_pages = PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ phba->params.defpdu_hdr_sz); @@ -1565,8 +1754,6 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) phba->mem_req[HWI_MEM_ADDN_CONTEXT] = sizeof(struct hwi_context_memory); - phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE; phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) * (phba->params.wrbs_per_cxn) @@ -1751,8 +1938,6 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { pwrb_context = &phwi_ctrlr->wrb_context[index]; - SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index, - pwrb_context); pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * phba->params.wrbs_per_cxn, GFP_KERNEL); @@ -1767,6 +1952,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) pwrb_context->pwrb_handle_basestd[j] = pwrb_handle; pwrb_context->wrb_handles_available++; + pwrb_handle->wrb_index = j; pwrb_handle++; } pwrb_context->free_index = 0; @@ -1785,6 +1971,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) pwrb_context->pwrb_handle_basestd[j] = pwrb_handle; pwrb_context->wrb_handles_available++; + pwrb_handle->wrb_index = j; pwrb_handle++; } pwrb_context->free_index = 0; @@ -2042,79 +2229,126 @@ static int be_fill_queue(struct be_queue_info *q, return 0; } -static int beiscsi_create_eq(struct beiscsi_hba *phba, +static int beiscsi_create_eqs(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context) { - unsigned int idx; - int ret; + unsigned int i, num_eq_pages; + int ret, eq_for_mcc; struct be_queue_info *eq; struct be_dma_mem *mem; - struct be_mem_descriptor *mem_descr; void *eq_vaddress; + dma_addr_t paddr; - idx = 0; - eq = &phwi_context->be_eq.q; - mem = &eq->dma_mem; - mem_descr = phba->init_mem; - mem_descr += HWI_MEM_EQ; - eq_vaddress = mem_descr->mem_array[idx].virtual_address; - - ret = be_fill_queue(eq, phba->params.num_eq_entries, - sizeof(struct be_eq_entry), eq_vaddress); - if (ret) { - shost_printk(KERN_ERR, phba->shost, - "be_fill_queue Failed for EQ \n"); - return ret; - } + num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ + sizeof(struct be_eq_entry)); - mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; + if (phba->msix_enabled) + eq_for_mcc = 1; + else + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { + eq = &phwi_context->be_eq[i].q; + mem = &eq->dma_mem; + phwi_context->be_eq[i].phba = phba; + eq_vaddress = pci_alloc_consistent(phba->pcidev, + num_eq_pages * PAGE_SIZE, + &paddr); + if (!eq_vaddress) + goto create_eq_error; + + mem->va = eq_vaddress; + ret = be_fill_queue(eq, phba->params.num_eq_entries, + sizeof(struct be_eq_entry), eq_vaddress); + if (ret) { + shost_printk(KERN_ERR, phba->shost, + "be_fill_queue Failed for EQ \n"); + goto create_eq_error; + } - ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, - phwi_context->be_eq.cur_eqd); - if (ret) { - shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create" - "Failedfor EQ \n"); - return ret; + mem->dma = paddr; + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, + phwi_context->cur_eqd); + if (ret) { + shost_printk(KERN_ERR, phba->shost, + "beiscsi_cmd_eq_create" + "Failedfor EQ \n"); + goto create_eq_error; + } + SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id); } - SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id); return 0; +create_eq_error: + for (i = 0; i < (phba->num_cpus + 1); i++) { + eq = &phwi_context->be_eq[i].q; + mem = &eq->dma_mem; + if (mem->va) + pci_free_consistent(phba->pcidev, num_eq_pages + * PAGE_SIZE, + mem->va, mem->dma); + } + return ret; } -static int beiscsi_create_cq(struct beiscsi_hba *phba, +static int beiscsi_create_cqs(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context) { - unsigned int idx; + unsigned int i, num_cq_pages; int ret; struct be_queue_info *cq, *eq; struct be_dma_mem *mem; - struct be_mem_descriptor *mem_descr; + struct be_eq_obj *pbe_eq; void *cq_vaddress; + dma_addr_t paddr; - idx = 0; - cq = &phwi_context->be_cq; - eq = &phwi_context->be_eq.q; - mem = &cq->dma_mem; - mem_descr = phba->init_mem; - mem_descr += HWI_MEM_CQ; - cq_vaddress = mem_descr->mem_array[idx].virtual_address; - ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, - sizeof(struct sol_cqe), cq_vaddress); - if (ret) { - shost_printk(KERN_ERR, phba->shost, - "be_fill_queue Failed for ISCSI CQ \n"); - return ret; - } + num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ + sizeof(struct sol_cqe)); - mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; - ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0); - if (ret) { - shost_printk(KERN_ERR, phba->shost, - "beiscsi_cmd_eq_create Failed for ISCSI CQ \n"); - return ret; + for (i = 0; i < phba->num_cpus; i++) { + cq = &phwi_context->be_cq[i]; + eq = &phwi_context->be_eq[i].q; + pbe_eq = &phwi_context->be_eq[i]; + pbe_eq->cq = cq; + pbe_eq->phba = phba; + mem = &cq->dma_mem; + cq_vaddress = pci_alloc_consistent(phba->pcidev, + num_cq_pages * PAGE_SIZE, + &paddr); + if (!cq_vaddress) + goto create_cq_error; + ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, + sizeof(struct sol_cqe), cq_vaddress); + if (ret) { + shost_printk(KERN_ERR, phba->shost, + "be_fill_queue Failed for ISCSI CQ \n"); + goto create_cq_error; + } + + mem->dma = paddr; + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, + false, 0); + if (ret) { + shost_printk(KERN_ERR, phba->shost, + "beiscsi_cmd_eq_create" + "Failed for ISCSI CQ \n"); + goto create_cq_error; + } + SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n", + cq->id, eq->id); + SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); } - SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id); - SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); return 0; + +create_cq_error: + for (i = 0; i < phba->num_cpus; i++) { + cq = &phwi_context->be_cq[i]; + mem = &cq->dma_mem; + if (mem->va) + pci_free_consistent(phba->pcidev, num_cq_pages + * PAGE_SIZE, + mem->va, mem->dma); + } + return ret; + } static int @@ -2132,7 +2366,7 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba, idx = 0; dq = &phwi_context->be_def_hdrq; - cq = &phwi_context->be_cq; + cq = &phwi_context->be_cq[0]; mem = &dq->dma_mem; mem_descr = phba->init_mem; mem_descr += HWI_MEM_ASYNC_HEADER_RING; @@ -2176,7 +2410,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba, idx = 0; dataq = &phwi_context->be_def_dataq; - cq = &phwi_context->be_cq; + cq = &phwi_context->be_cq[0]; mem = &dataq->dma_mem; mem_descr = phba->init_mem; mem_descr += HWI_MEM_ASYNC_DATA_RING; @@ -2239,6 +2473,30 @@ beiscsi_post_pages(struct beiscsi_hba *phba) return 0; } +static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) +{ + struct be_dma_mem *mem = &q->dma_mem; + if (mem->va) + pci_free_consistent(phba->pcidev, mem->size, + mem->va, mem->dma); +} + +static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, + u16 len, u16 entry_size) +{ + struct be_dma_mem *mem = &q->dma_mem; + + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + mem->size = len * entry_size; + mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); + if (!mem->va) + return -1; + memset(mem->va, 0, mem->size); + return 0; +} + static int beiscsi_create_wrb_rings(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, @@ -2328,13 +2586,29 @@ static void free_wrb_handles(struct beiscsi_hba *phba) } } +static void be_mcc_queues_destroy(struct beiscsi_hba *phba) +{ + struct be_queue_info *q; + struct be_ctrl_info *ctrl = &phba->ctrl; + + q = &phba->ctrl.mcc_obj.q; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); + be_queue_free(phba, q); + + q = &phba->ctrl.mcc_obj.cq; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + be_queue_free(phba, q); +} + static void hwi_cleanup(struct beiscsi_hba *phba) { struct be_queue_info *q; struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - int i; + int i, eq_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -2343,7 +2617,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } - free_wrb_handles(phba); q = &phwi_context->be_def_hdrq; @@ -2356,13 +2629,76 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); - q = &phwi_context->be_cq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + for (i = 0; i < (phba->num_cpus); i++) { + q = &phwi_context->be_cq[i]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + } + if (phba->msix_enabled) + eq_num = 1; + else + eq_num = 0; + for (i = 0; i < (phba->num_cpus + eq_num); i++) { + q = &phwi_context->be_eq[i].q; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); + } + be_mcc_queues_destroy(phba); +} - q = &phwi_context->be_eq.q; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); +static int be_mcc_queues_create(struct beiscsi_hba *phba, + struct hwi_context_memory *phwi_context) +{ + struct be_queue_info *q, *cq; + struct be_ctrl_info *ctrl = &phba->ctrl; + + /* Alloc MCC compl queue */ + cq = &phba->ctrl.mcc_obj.cq; + if (be_queue_alloc(phba, cq, MCC_CQ_LEN, + sizeof(struct be_mcc_compl))) + goto err; + /* Ask BE to create MCC compl queue; */ + if (phba->msix_enabled) { + if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq + [phba->num_cpus].q, false, true, 0)) + goto mcc_cq_free; + } else { + if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, + false, true, 0)) + goto mcc_cq_free; + } + + /* Alloc MCC queue */ + q = &phba->ctrl.mcc_obj.q; + if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) + goto mcc_cq_destroy; + + /* Ask BE to create MCC queue */ + if (be_cmd_mccq_create(phba, q, cq)) + goto mcc_q_free; + + return 0; + +mcc_q_free: + be_queue_free(phba, q); +mcc_cq_destroy: + beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); +mcc_cq_free: + be_queue_free(phba, cq); +err: + return -1; +} + +static int find_num_cpus(void) +{ + int num_cpus = 0; + + num_cpus = num_online_cpus(); + if (num_cpus >= MAX_CPUS) + num_cpus = MAX_CPUS - 1; + + SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus); + return num_cpus; } static int hwi_init_port(struct beiscsi_hba *phba) @@ -2376,20 +2712,23 @@ static int hwi_init_port(struct beiscsi_hba *phba) def_pdu_ring_sz = phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); phwi_ctrlr = phba->phwi_ctrlr; - phwi_context = phwi_ctrlr->phwi_ctxt; - phwi_context->be_eq.max_eqd = 0; - phwi_context->be_eq.min_eqd = 0; - phwi_context->be_eq.cur_eqd = 64; - phwi_context->be_eq.enable_aic = false; + phwi_context->max_eqd = 0; + phwi_context->min_eqd = 0; + phwi_context->cur_eqd = 64; be_cmd_fw_initialize(&phba->ctrl); - status = beiscsi_create_eq(phba, phwi_context); + + status = beiscsi_create_eqs(phba, phwi_context); if (status != 0) { shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); goto error; } - status = mgmt_check_supported_fw(ctrl); + status = be_mcc_queues_create(phba, phwi_context); + if (status != 0) + goto error; + + status = mgmt_check_supported_fw(ctrl, phba); if (status != 0) { shost_printk(KERN_ERR, phba->shost, "Unsupported fw version \n"); @@ -2403,7 +2742,7 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } - status = beiscsi_create_cq(phba, phwi_context); + status = beiscsi_create_cqs(phba, phwi_context); if (status != 0) { shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); goto error; @@ -2447,7 +2786,6 @@ error: return -ENOMEM; } - static int hwi_init_controller(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; @@ -2530,6 +2868,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; + mem_descr_sglh = phba->init_mem; mem_descr_sglh += HWI_MEM_SGLH; if (1 == mem_descr_sglh->num_elements) { @@ -2656,13 +2995,12 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) struct hwi_context_memory *phwi_context; struct be_queue_info *eq; u8 __iomem *addr; - u32 reg; + u32 reg, i; u32 enabled; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - eq = &phwi_context->be_eq.q; addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); reg = ioread32(addr); @@ -2673,9 +3011,11 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); iowrite32(reg, addr); - SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); - - hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + for (i = 0; i <= phba->num_cpus; i++) { + eq = &phwi_context->be_eq[i].q; + SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } } else shost_printk(KERN_WARNING, phba->shost, "In hwi_enable_intr, Not Enabled \n"); @@ -2738,17 +3078,25 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) struct hwi_context_memory *phwi_context; struct be_queue_info *eq; struct be_eq_entry *eqe = NULL; + int i, eq_msix; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - eq = &phwi_context->be_eq.q; - eqe = queue_tail_node(eq); + if (phba->msix_enabled) + eq_msix = 1; + else + eq_msix = 0; - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); + for (i = 0; i < (phba->num_cpus + eq_msix); i++) { + eq = &phwi_context->be_eq[i].q; eqe = queue_tail_node(eq); + + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } } } @@ -2846,8 +3194,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; - doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << - DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) + << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); @@ -2856,7 +3204,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, int *index, int *age) { - *index = be32_to_cpu(itt) >> 16; + *index = (int)itt; if (age) *age = conn->session->age; } @@ -2885,15 +3233,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, GFP_KERNEL, &paddr); - if (!io_task->cmd_bhs) return -ENOMEM; - io_task->bhs_pa.u.a64.address = paddr; + io_task->libiscsi_itt = (itt_t)task->itt; io_task->pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, task->itt); - io_task->pwrb_handle->pio_handle = task; io_task->conn = beiscsi_conn; task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; @@ -2905,7 +3251,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) spin_unlock(&phba->io_sgl_lock); if (!io_task->psgl_handle) goto free_hndls; - } else { io_task->scsi_cmnd = NULL; if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { @@ -2932,8 +3277,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } } - itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) | - (unsigned int)(io_task->psgl_handle->sgl_index)); + itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> + wrb_index << 16) | (unsigned int) + (io_task->psgl_handle->sgl_index)); + io_task->pwrb_handle->pio_handle = task; + io_task->cmd_bhs->iscsi_hdr.itt = itt; return 0; @@ -3006,7 +3354,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, io_task->bhs_len = sizeof(struct be_cmd_bhs); if (writedir) { - SE_DEBUG(DBG_LVL_4, " WRITE Command \t"); memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); AMAP_SET_BITS(struct amap_pdu_data_out, itt, &io_task->cmd_bhs->iscsi_data_pdu, @@ -3016,11 +3363,12 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, ISCSI_OPCODE_SCSI_DATA_OUT); AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, &io_task->cmd_bhs->iscsi_data_pdu, 1); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); } else { - SE_DEBUG(DBG_LVL_4, "READ Command \t"); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_RD_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); } memcpy(&io_task->cmd_bhs->iscsi_data_pdu. @@ -3059,10 +3407,16 @@ static int beiscsi_mtask(struct iscsi_task *task) struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_session *session; struct iscsi_wrb *pwrb = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + struct wrb_handle *pwrb_handle; unsigned int doorbell = 0; + unsigned int i, cid; struct iscsi_task *aborted_task; + cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); @@ -3073,33 +3427,43 @@ static int beiscsi_mtask(struct iscsi_task *task) switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + TGT_DM_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_RD_CMD); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_SCSI_TMFUNC: - aborted_task = iscsi_itt_to_task(conn, - ((struct iscsi_tm *)task->hdr)->rtt); + session = conn->session; + i = ((struct iscsi_tm *)task->hdr)->rtt; + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) + >> 16]; + aborted_task = pwrb_handle->pio_handle; if (!aborted_task) return 0; + aborted_io_task = aborted_task->dd_data; if (!aborted_io_task->scsi_cmnd) return 0; mgmt_invalidate_icds(phba, aborted_io_task->psgl_handle->sgl_index, - beiscsi_conn->beiscsi_conn_cid); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD); + cid); + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_TMF_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); hwi_write_buffer(pwrb, task); break; @@ -3122,7 +3486,7 @@ static int beiscsi_mtask(struct iscsi_task *task) io_task->pwrb_handle->nxt_wrb_index); be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); - doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; + doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; @@ -3165,9 +3529,14 @@ static int beiscsi_task_xmit(struct iscsi_task *task) return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); } + static void beiscsi_remove(struct pci_dev *pcidev) { struct beiscsi_hba *phba = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_eq_obj *pbe_eq; + unsigned int i, msix_vec; phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); if (!phba) { @@ -3175,12 +3544,24 @@ static void beiscsi_remove(struct pci_dev *pcidev) return; } + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; hwi_disable_intr(phba); - if (phba->pcidev->irq) - free_irq(phba->pcidev->irq, phba); + if (phba->msix_enabled) { + for (i = 0; i <= phba->num_cpus; i++) { + msix_vec = phba->msix_entries[i].vector; + free_irq(msix_vec, &phwi_context->be_eq[i]); + } + } else + if (phba->pcidev->irq) + free_irq(phba->pcidev->irq, phba); + pci_disable_msix(phba->pcidev); destroy_workqueue(phba->wq); if (blk_iopoll_enabled) - blk_iopoll_disable(&phba->iopoll); + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } beiscsi_clean_port(phba); beiscsi_free_mem(phba); @@ -3194,11 +3575,29 @@ static void beiscsi_remove(struct pci_dev *pcidev) iscsi_host_free(phba->shost); } +static void beiscsi_msix_enable(struct beiscsi_hba *phba) +{ + int i, status; + + for (i = 0; i <= phba->num_cpus; i++) + phba->msix_entries[i].entry = i; + + status = pci_enable_msix(phba->pcidev, phba->msix_entries, + (phba->num_cpus + 1)); + if (!status) + phba->msix_enabled = true; + + return; +} + static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct beiscsi_hba *phba = NULL; - int ret; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + struct be_eq_obj *pbe_eq; + int ret, msix_vec, num_cpus, i; ret = beiscsi_enable_pci(pcidev); if (ret < 0) { @@ -3213,8 +3612,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, " Failed in beiscsi_hba_alloc \n"); goto disable_pci; } + SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); pci_set_drvdata(pcidev, phba); + if (enable_msix) + num_cpus = find_num_cpus(); + else + num_cpus = 1; + phba->num_cpus = num_cpus; + SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus); + + if (enable_msix) + beiscsi_msix_enable(phba); ret = be_ctrl_init(phba, pcidev); if (ret) { shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" @@ -3235,7 +3644,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", phba->shost->host_no); - phba->wq = create_singlethread_workqueue(phba->wq_name); + phba->wq = create_workqueue(phba->wq_name); if (!phba->wq) { shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" "Failed to allocate work queue\n"); @@ -3244,11 +3653,16 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; if (blk_iopoll_enabled) { - blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll); - blk_iopoll_enable(&phba->iopoll); + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); + } } - ret = beiscsi_init_irqs(phba); if (ret < 0) { shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" @@ -3261,17 +3675,26 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, "Failed to hwi_enable_intr\n"); goto free_ctrlr; } - SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n"); return 0; free_ctrlr: - if (phba->pcidev->irq) - free_irq(phba->pcidev->irq, phba); + if (phba->msix_enabled) { + for (i = 0; i <= phba->num_cpus; i++) { + msix_vec = phba->msix_entries[i].vector; + free_irq(msix_vec, &phwi_context->be_eq[i]); + } + } else + if (phba->pcidev->irq) + free_irq(phba->pcidev->irq, phba); + pci_disable_msix(phba->pcidev); free_blkenbld: destroy_workqueue(phba->wq); if (blk_iopoll_enabled) - blk_iopoll_disable(&phba->iopoll); + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } free_twq: beiscsi_clean_port(phba); beiscsi_free_mem(phba); @@ -3351,6 +3774,7 @@ static struct pci_driver beiscsi_pci_driver = { .id_table = beiscsi_pci_id_table }; + static int __init beiscsi_module_init(void) { int ret; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 53c9b70ac7ac..25e6b208b771 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -21,11 +21,9 @@ #ifndef _BEISCSI_MAIN_ #define _BEISCSI_MAIN_ - #include #include #include -#include #include #include #include @@ -35,12 +33,8 @@ #include #include "be.h" - - - #define DRV_NAME "be2iscsi" #define BUILD_STR "2.0.527.0" - #define BE_NAME "ServerEngines BladeEngine2" \ "Linux iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -49,6 +43,8 @@ #define BE_DEVICE_ID1 0x212 #define OC_DEVICE_ID1 0x702 #define OC_DEVICE_ID2 0x703 +#define OC_DEVICE_ID3 0x712 +#define OC_DEVICE_ID4 0x222 #define BE2_MAX_SESSIONS 64 #define BE2_CMDS_PER_CXN 128 @@ -63,6 +59,7 @@ #define BE2_IO_DEPTH \ (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ)) +#define MAX_CPUS 31 #define BEISCSI_SGLIST_ELEMENTS BE2_SGE #define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */ @@ -79,7 +76,7 @@ #define BE_SENSE_INFO_SIZE 258 #define BE_ISCSI_PDU_HEADER_SIZE 64 #define BE_MIN_MEM_SIZE 16384 - +#define MAX_CMD_SZ 65536 #define IIOC_SCSI_DATA 0x05 /* Write Operation */ #define DBG_LVL 0x00000001 @@ -100,6 +97,8 @@ do { \ } \ } while (0); +#define BE_ADAPTER_UP 0x00000000 +#define BE_ADAPTER_LINK_DOWN 0x00000001 /** * hardware needs the async PDU buffers to be posted in multiples of 8 * So have atleast 8 of them by default @@ -160,21 +159,19 @@ do { \ enum be_mem_enum { HWI_MEM_ADDN_CONTEXT, - HWI_MEM_CQ, - HWI_MEM_EQ, HWI_MEM_WRB, HWI_MEM_WRBH, - HWI_MEM_SGLH, /* 5 */ + HWI_MEM_SGLH, HWI_MEM_SGE, - HWI_MEM_ASYNC_HEADER_BUF, + HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ HWI_MEM_ASYNC_DATA_BUF, HWI_MEM_ASYNC_HEADER_RING, - HWI_MEM_ASYNC_DATA_RING, /* 10 */ + HWI_MEM_ASYNC_DATA_RING, HWI_MEM_ASYNC_HEADER_HANDLE, - HWI_MEM_ASYNC_DATA_HANDLE, + HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ HWI_MEM_ASYNC_PDU_CONTEXT, ISCSI_MEM_GLOBAL_HEADER, - SE_MEM_MAX /* 15 */ + SE_MEM_MAX }; struct be_bus_address32 { @@ -212,6 +209,9 @@ struct be_mem_descriptor { struct sgl_handle { unsigned int sgl_index; + unsigned int type; + unsigned int cid; + struct iscsi_task *task; struct iscsi_sge *pfrag; }; @@ -274,13 +274,17 @@ struct beiscsi_hba { struct pci_dev *pcidev; unsigned int state; unsigned short asic_revision; - struct blk_iopoll iopoll; + unsigned int num_cpus; + unsigned int nxt_cqid; + struct msix_entry msix_entries[MAX_CPUS + 1]; + bool msix_enabled; struct be_mem_descriptor *init_mem; unsigned short io_sgl_alloc_index; unsigned short io_sgl_free_index; unsigned short io_sgl_hndl_avbl; struct sgl_handle **io_sgl_hndl_base; + struct sgl_handle **sgl_hndl_array; unsigned short eh_sgl_alloc_index; unsigned short eh_sgl_free_index; @@ -315,6 +319,7 @@ struct beiscsi_hba { unsigned short cid_alloc; unsigned short cid_free; unsigned short avlbl_cids; + unsigned short iscsi_features; spinlock_t cid_lock; } fw_config; @@ -343,6 +348,7 @@ struct beiscsi_conn { unsigned short login_in_progress; struct sgl_handle *plogin_sgl_handle; struct beiscsi_session *beiscsi_sess; + struct iscsi_task *task; }; /* This structure is used by the chip */ @@ -390,7 +396,7 @@ struct beiscsi_io_task { unsigned int flags; unsigned short cid; unsigned short header_len; - + itt_t libiscsi_itt; struct be_cmd_bhs *cmd_bhs; struct be_bus_address bhs_pa; unsigned short bhs_len; @@ -599,7 +605,6 @@ struct amap_cq_db { void beiscsi_process_eq(struct beiscsi_hba *phba); - struct iscsi_wrb { u32 dw[16]; } __packed; @@ -820,10 +825,12 @@ struct wrb_handle { }; struct hwi_context_memory { - struct be_eq_obj be_eq; - struct be_queue_info be_cq; - struct be_queue_info be_mcc_cq; - struct be_queue_info be_mcc; + /* Adaptive interrupt coalescing (AIC) info */ + u16 min_eqd; /* in usecs */ + u16 max_eqd; /* in usecs */ + u16 cur_eqd; /* in usecs */ + struct be_eq_obj be_eq[MAX_CPUS]; + struct be_queue_info be_cq[MAX_CPUS]; struct be_queue_info be_def_hdrq; struct be_queue_info be_def_dataq; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 12e644fc746e..79c2bd525a84 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -35,7 +35,6 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); - status = be_mbox_notify(ctrl); if (!status) { struct be_fw_cfg *pfw_cfg; @@ -58,7 +57,8 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, return status; } -unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) +unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba) { struct be_dma_mem nonemb_cmd; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -85,7 +85,6 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd.size); - status = be_mbox_notify(ctrl); if (!status) { struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; @@ -95,21 +94,25 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl) resp->params.hba_attribs.firmware_version_string); SE_DEBUG(DBG_LVL_8, "Developer Build, not performing version check...\n"); - + phba->fw_config.iscsi_features = + resp->params.hba_attribs.iscsi_features; + SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n", + phba->fw_config.iscsi_features); } else SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n"); + spin_unlock(&ctrl->mbox_lock); if (nonemb_cmd.va) pci_free_consistent(ctrl->pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); - spin_unlock(&ctrl->mbox_lock); return status; } + unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct iscsi_cleanup_req *req = embedded_payload(wrb); int status = 0; @@ -124,7 +127,7 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) req->hdr_ring_id = 0; req->data_ring_id = 0; - status = be_mbox_notify(ctrl); + status = be_mcc_notify_wait(phba); if (status) shost_printk(KERN_WARNING, phba->shost, " mgmt_epfw_cleanup , FAILED\n"); @@ -137,7 +140,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, { struct be_dma_mem nonemb_cmd; struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct be_sge *sge = nonembedded_sgl(wrb); struct invalidate_commands_params_in *req; int status = 0; @@ -169,7 +172,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd.size); - status = be_mbox_notify(ctrl); + status = be_mcc_notify_wait(phba); if (status) SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n"); spin_unlock(&ctrl->mbox_lock); @@ -186,7 +189,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, unsigned short savecfg_flag) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct iscsi_invalidate_connection_params_in *req = embedded_payload(wrb); int status = 0; @@ -205,7 +208,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, else req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; req->save_cfg = savecfg_flag; - status = be_mbox_notify(ctrl); + status = be_mcc_notify_wait(phba); if (status) SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n"); @@ -217,7 +220,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, unsigned short cid, unsigned int upload_flag) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct tcp_upload_params_in *req = embedded_payload(wrb); int status = 0; @@ -229,7 +232,7 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); req->id = (unsigned short)cid; req->upload_type = (unsigned char)upload_flag; - status = be_mbox_notify(ctrl); + status = be_mcc_notify_wait(phba); if (status) SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n"); spin_unlock(&ctrl->mbox_lock); @@ -245,13 +248,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct tcp_connect_and_offload_in *req = embedded_payload(wrb); unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; struct phys_addr *ptemplate_address; int status = 0; + unsigned int i; unsigned short cid = beiscsi_ep->ep_cid; phwi_ctrlr = phba->phwi_ctrlr; @@ -296,14 +300,18 @@ int mgmt_open_connection(struct beiscsi_hba *phba, } req->cid = cid; - req->cq_id = phwi_context->be_cq.id; + i = phba->nxt_cqid++; + if (phba->nxt_cqid == phba->num_cpus) + phba->nxt_cqid = 0; + req->cq_id = phwi_context->be_cq[i].id; + SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id); req->defq_id = def_hdr_id; req->hdr_ring_id = def_hdr_id; req->data_ring_id = def_data_id; req->do_offload = 1; req->dataout_template_pa.lo = ptemplate_address->lo; req->dataout_template_pa.hi = ptemplate_address->hi; - status = be_mbox_notify(ctrl); + status = be_mcc_notify_wait(phba); if (!status) { struct iscsi_endpoint *ep; struct tcp_connect_and_offload_out *ptcpcnct_out = @@ -311,7 +319,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, ep = phba->ep_array[ptcpcnct_out->cid]; beiscsi_ep = ep->dd_data; - beiscsi_ep->fw_handle = 0; + beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); } else @@ -319,3 +327,30 @@ int mgmt_open_connection(struct beiscsi_hba *phba, spin_unlock(&ctrl->mbox_lock); return status; } + +int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb = wrb_from_mccq(phba); + struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); + int status; + + SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n"); + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, + OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, + sizeof(*req)); + + status = be_mcc_notify_wait(phba); + if (!status) { + struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb); + + memcpy(mac_addr, resp->mac_address, ETH_ALEN); + } + + spin_unlock(&ctrl->mbox_lock); + return status; +} + diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 00e816ee8070..24eaff923f85 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -175,7 +175,9 @@ struct mgmt_hba_attributes { u8 phy_port; u32 firmware_post_status; u32 hba_mtu[8]; - u32 future_u32[4]; + u8 iscsi_features; + u8 future_u8[3]; + u32 future_u32[3]; } __packed; struct mgmt_controller_attributes { @@ -246,4 +248,8 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, unsigned short cid, unsigned short issue_reset, unsigned short savecfg_flag); + +unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba, + char *buf, unsigned int len); #endif -- cgit v1.2.3-59-g8ed1b From 35e6601903fc41e48e9b6722a49cc5acc7065c51 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Fri, 23 Oct 2009 11:53:49 +0530 Subject: [SCSI] be2iscsi: Adding Ring Mode Wrb's V3 This patch adds support for ring based wrbs Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_cmds.c | 6 +- drivers/scsi/be2iscsi/be_cmds.h | 2 +- drivers/scsi/be2iscsi/be_main.c | 209 +++++++++++++++++++++++++++++++--------- 3 files changed, 169 insertions(+), 48 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 10f8fe7a38d2..698a527d6cca 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -125,7 +125,7 @@ static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, } -int be_process_mcc(struct beiscsi_hba *phba) +int beiscsi_process_mcc(struct beiscsi_hba *phba) { struct be_mcc_compl *compl; int num = 0, status = 0; @@ -161,7 +161,7 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba) #define mcc_timeout 120000 /* 5s timeout */ int i, status; for (i = 0; i < mcc_timeout; i++) { - status = be_process_mcc(phba); + status = beiscsi_process_mcc(phba); if (status) return status; @@ -504,7 +504,7 @@ static u32 be_encoded_q_len(int q_len) return len_encoded; } -int be_cmd_mccq_create(struct beiscsi_hba *phba, +int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, struct be_queue_info *mccq, struct be_queue_info *cq) { diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 76fe1f9dd4cb..5de8acb924cb 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -418,7 +418,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, int type); -int be_cmd_mccq_create(struct beiscsi_hba *phba, +int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, struct be_queue_info *mccq, struct be_queue_info *cq); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 2c3e99eeff82..d15df07ba783 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -40,6 +40,7 @@ static unsigned int be_iopoll_budget = 10; static unsigned int be_max_phys_size = 64; static unsigned int enable_msix = 1; +static unsigned int ring_mode; MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); @@ -670,8 +671,9 @@ static void free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle) { - - pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; + if (!ring_mode) + pwrb_context->pwrb_handle_base[pwrb_context->free_index] = + pwrb_handle; pwrb_context->wrb_handles_available++; if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) pwrb_context->free_index = 0; @@ -857,6 +859,7 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, { struct hwi_wrb_context *pwrb_context; struct wrb_handle *pwrb_handle = NULL; + struct sgl_handle *psgl_handle = NULL; struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; @@ -864,13 +867,23 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct iscsi_session *session = conn->session; phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[((psol-> - dw[offsetof(struct amap_sol_cqe, cid) / 32] & - SOL_CID_MASK) >> 6)]; - pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; - task = pwrb_handle->pio_handle; + if (ring_mode) { + psgl_handle = phba->sgl_hndl_array[((psol-> + dw[offsetof(struct amap_sol_cqe_ring, icd_index) / + 32] & SOL_ICD_INDEX_MASK) >> 6)]; + pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid]; + task = psgl_handle->task; + pwrb_handle = NULL; + } else { + pwrb_context = &phwi_ctrlr->wrb_context[((psol-> + dw[offsetof(struct amap_sol_cqe, cid) / 32] & + SOL_CID_MASK) >> 6)]; + pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> + dw[offsetof(struct amap_sol_cqe, wrb_index) / + 32] & SOL_WRB_INDEX_MASK) >> 16)]; + task = pwrb_handle->pio_handle; + } + io_task = task->dd_data; spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); @@ -910,23 +923,31 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_wrb *pwrb = NULL; struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; + struct sgl_handle *psgl_handle = NULL; unsigned int type; struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr-> - wrb_context[((psol->dw[offsetof - (struct amap_sol_cqe, cid) / 32] - & SOL_CID_MASK) >> 6)]; - pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; - task = pwrb_handle->pio_handle; - pwrb = pwrb_handle->pwrb; - type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & + if (ring_mode) { + psgl_handle = phba->sgl_hndl_array[((psol-> + dw[offsetof(struct amap_sol_cqe_ring, icd_index) / + 32] & SOL_ICD_INDEX_MASK) >> 6)]; + task = psgl_handle->task; + type = psgl_handle->type; + } else { + pwrb_context = &phwi_ctrlr-> + wrb_context[((psol->dw[offsetof + (struct amap_sol_cqe, cid) / 32] + & SOL_CID_MASK) >> 6)]; + pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> + dw[offsetof(struct amap_sol_cqe, wrb_index) / + 32] & SOL_WRB_INDEX_MASK) >> 16)]; + task = pwrb_handle->pio_handle; + pwrb = pwrb_handle->pwrb; + type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & WRB_TYPE_MASK) >> 28; - + } spin_lock_bh(&session->lock); switch (type) { case HWH_TYPE_IO: @@ -957,15 +978,24 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, break; default: - shost_printk(KERN_WARNING, phba->shost, - "In hwi_complete_cmd, unknown type = %d" - "wrb_index 0x%x CID 0x%x\n", type, - ((psol->dw[offsetof(struct amap_iscsi_wrb, - type) / 32] & SOL_WRB_INDEX_MASK) >> 16), - ((psol->dw[offsetof(struct amap_sol_cqe, - cid) / 32] & SOL_CID_MASK) >> 6)); + if (ring_mode) + shost_printk(KERN_WARNING, phba->shost, + "In hwi_complete_cmd, unknown type = %d" + "icd_index 0x%x CID 0x%x\n", type, + ((psol->dw[offsetof(struct amap_sol_cqe_ring, + icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6), + psgl_handle->cid); + else + shost_printk(KERN_WARNING, phba->shost, + "In hwi_complete_cmd, unknown type = %d" + "wrb_index 0x%x CID 0x%x\n", type, + ((psol->dw[offsetof(struct amap_iscsi_wrb, + type) / 32] & SOL_WRB_INDEX_MASK) >> 16), + ((psol->dw[offsetof(struct amap_sol_cqe, + cid) / 32] & SOL_CID_MASK) >> 6)); break; } + spin_unlock_bh(&session->lock); } @@ -1401,6 +1431,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; struct beiscsi_conn *beiscsi_conn; + struct sgl_handle *psgl_handle = NULL; struct beiscsi_hba *phba; cq = pbe_eq->cq; @@ -1411,17 +1442,32 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) CQE_VALID_MASK) { be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); - beiscsi_conn = phba->conn_table[(u32) (sol-> + if (ring_mode) { + psgl_handle = phba->sgl_hndl_array[((sol-> + dw[offsetof(struct amap_sol_cqe_ring, + icd_index) / 32] & SOL_ICD_INDEX_MASK) + >> 6)]; + beiscsi_conn = phba->conn_table[psgl_handle->cid]; + if (!beiscsi_conn || !beiscsi_conn->ep) { + shost_printk(KERN_WARNING, phba->shost, + "Connection table empty for cid = %d\n", + psgl_handle->cid); + return 0; + } + + } else { + beiscsi_conn = phba->conn_table[(u32) (sol-> dw[offsetof(struct amap_sol_cqe, cid) / 32] & SOL_CID_MASK) >> 6]; - if (!beiscsi_conn || !beiscsi_conn->ep) { - shost_printk(KERN_WARNING, phba->shost, + if (!beiscsi_conn || !beiscsi_conn->ep) { + shost_printk(KERN_WARNING, phba->shost, "Connection table empty for cid = %d\n", (u32)(sol->dw[offsetof(struct amap_sol_cqe, cid) / 32] & SOL_CID_MASK) >> 6); return 0; } + } if (num_processed >= 32) { hwi_ring_cq_db(phba, cq->id, @@ -1465,13 +1511,21 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CMD_CXN_KILLED_ITT_INVALID: case CMD_CXN_KILLED_SEQ_OUTOFORDER: case CMD_CXN_KILLED_INVALID_DATASN_RCVD: - SE_DEBUG(DBG_LVL_1, + if (ring_mode) { + SE_DEBUG(DBG_LVL_1, + "CQ Error notification for cmd.. " + "code %d cid 0x%x\n", + sol->dw[offsetof(struct amap_sol_cqe, code) / + 32] & CQE_CODE_MASK, psgl_handle->cid); + } else { + SE_DEBUG(DBG_LVL_1, "CQ Error notification for cmd.. " "code %d cid 0x%x\n", sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK, (sol->dw[offsetof(struct amap_sol_cqe, cid) / 32] & SOL_CID_MASK)); + } break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: SE_DEBUG(DBG_LVL_1, @@ -1493,23 +1547,37 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CXN_KILLED_OVER_RUN_RESIDUAL: case CXN_KILLED_UNDER_RUN_RESIDUAL: case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: - SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " + if (ring_mode) { + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " + "0x%x...\n", + sol->dw[offsetof(struct amap_sol_cqe, code) / + 32] & CQE_CODE_MASK, psgl_handle->cid); + } else { + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " "0x%x...\n", sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK, sol->dw[offsetof(struct amap_sol_cqe, cid) / 32] & CQE_CID_MASK); + } iscsi_conn_failure(beiscsi_conn->conn, ISCSI_ERR_CONN_FAILED); break; case CXN_KILLED_RST_SENT: case CXN_KILLED_RST_RCVD: - SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" + if (ring_mode) { + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" + "received/sent on CID 0x%x...\n", + sol->dw[offsetof(struct amap_sol_cqe, code) / + 32] & CQE_CODE_MASK, psgl_handle->cid); + } else { + SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" "received/sent on CID 0x%x...\n", sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK, sol->dw[offsetof(struct amap_sol_cqe, cid) / 32] & CQE_CID_MASK); + } iscsi_conn_failure(beiscsi_conn->conn, ISCSI_ERR_CONN_FAILED); break; @@ -2674,7 +2742,7 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba, goto mcc_cq_destroy; /* Ask BE to create MCC queue */ - if (be_cmd_mccq_create(phba, q, cq)) + if (beiscsi_cmd_mccq_create(phba, q, cq)) goto mcc_q_free; return 0; @@ -2735,6 +2803,10 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } + if (phba->fw_config.iscsi_features == 0x1) + ring_mode = 1; + else + ring_mode = 0; status = mgmt_get_fw_config(ctrl, phba); if (status != 0) { shost_printk(KERN_ERR, phba->shost, @@ -2869,6 +2941,17 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; + if (ring_mode) { + phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) * + phba->params.icds_per_ctrl, + GFP_KERNEL); + if (!phba->sgl_hndl_array) { + shost_printk(KERN_ERR, phba->shost, + "Mem Alloc Failed. Failing to load\n"); + return -ENOMEM; + } + } + mem_descr_sglh = phba->init_mem; mem_descr_sglh += HWI_MEM_SGLH; if (1 == mem_descr_sglh->num_elements) { @@ -2876,6 +2959,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) phba->params.ios_per_ctrl, GFP_KERNEL); if (!phba->io_sgl_hndl_base) { + if (ring_mode) + kfree(phba->sgl_hndl_array); shost_printk(KERN_ERR, phba->shost, "Mem Alloc Failed. Failing to load\n"); return -ENOMEM; @@ -3060,6 +3145,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba) if (hba_setup_cid_tbls(phba)) { shost_printk(KERN_ERR, phba->shost, "Failed in hba_setup_cid_tbls\n"); + if (ring_mode) + kfree(phba->sgl_hndl_array); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); goto do_cleanup_ctrlr; @@ -3110,6 +3197,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) "mgmt_epfw_cleanup FAILED \n"); hwi_cleanup(phba); hwi_purge_eq(phba); + if (ring_mode) + kfree(phba->sgl_hndl_array); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); kfree(phba->cid_array); @@ -3194,7 +3283,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; - doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) + if (!ring_mode) + doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; @@ -3280,7 +3370,14 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> wrb_index << 16) | (unsigned int) (io_task->psgl_handle->sgl_index)); - io_task->pwrb_handle->pio_handle = task; + if (ring_mode) { + phba->sgl_hndl_array[io_task->psgl_handle->sgl_index - + phba->fw_config.iscsi_cid_start] = + io_task->psgl_handle; + io_task->psgl_handle->task = task; + io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid; + } else + io_task->pwrb_handle->pio_handle = task; io_task->cmd_bhs->iscsi_hdr.itt = itt; return 0; @@ -3363,11 +3460,17 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, ISCSI_OPCODE_SCSI_DATA_OUT); AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, &io_task->cmd_bhs->iscsi_data_pdu, 1); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = INI_WR_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); } else { - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = INI_RD_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); } @@ -3393,7 +3496,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; - doorbell |= (io_task->pwrb_handle->wrb_index & + if (!ring_mode) + doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; @@ -3427,19 +3531,28 @@ static int beiscsi_mtask(struct iscsi_task *task) switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = TGT_DM_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = INI_RD_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = INI_WR_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); hwi_write_buffer(pwrb, task); @@ -3462,13 +3575,19 @@ static int beiscsi_mtask(struct iscsi_task *task) mgmt_invalidate_icds(phba, aborted_io_task->psgl_handle->sgl_index, cid); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + if (ring_mode) + io_task->psgl_handle->type = INI_TMF_CMD; + else + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_LOGOUT: AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); + if (ring_mode) + io_task->psgl_handle->type = HWH_TYPE_LOGOUT; + else AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, HWH_TYPE_LOGOUT); hwi_write_buffer(pwrb, task); @@ -3487,7 +3606,8 @@ static int beiscsi_mtask(struct iscsi_task *task) be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); doorbell |= cid & DB_WRB_POST_CID_MASK; - doorbell |= (io_task->pwrb_handle->wrb_index & + if (!ring_mode) + doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); @@ -3797,6 +3917,7 @@ static int __init beiscsi_module_init(void) "beiscsi pci driver.\n"); goto unregister_iscsi_transport; } + ring_mode = 0; return 0; unregister_iscsi_transport: -- cgit v1.2.3-59-g8ed1b From dbf9bfe615717d1145f263c0049fe2328e6ed395 Mon Sep 17 00:00:00 2001 From: jack wang Date: Wed, 14 Oct 2009 16:19:21 +0800 Subject: [SCSI] pm8001: add SAS/SATA HBA driver This driver supports PMC-Sierra PCIe SAS/SATA 8x6G SPC 8001 chip based host adapters. Signed-off-by: Jack Wang Signed-off-by: Lindar Liu Signed-off-by: Tom Peng Signed-off-by: Kevin Ao Signed-off-by: James Bottomley --- MAINTAINERS | 7 + drivers/scsi/Kconfig | 8 + drivers/scsi/Makefile | 1 + drivers/scsi/pm8001/Makefile | 12 + drivers/scsi/pm8001/pm8001_chips.h | 89 + drivers/scsi/pm8001/pm8001_ctl.c | 573 +++++ drivers/scsi/pm8001/pm8001_ctl.h | 67 + drivers/scsi/pm8001/pm8001_defs.h | 112 + drivers/scsi/pm8001/pm8001_hwi.c | 4371 ++++++++++++++++++++++++++++++++++++ drivers/scsi/pm8001/pm8001_hwi.h | 1011 +++++++++ drivers/scsi/pm8001/pm8001_init.c | 888 ++++++++ drivers/scsi/pm8001/pm8001_sas.c | 1104 +++++++++ drivers/scsi/pm8001/pm8001_sas.h | 480 ++++ include/linux/pci_ids.h | 2 + 14 files changed, 8725 insertions(+) create mode 100644 drivers/scsi/pm8001/Makefile create mode 100644 drivers/scsi/pm8001/pm8001_chips.h create mode 100644 drivers/scsi/pm8001/pm8001_ctl.c create mode 100644 drivers/scsi/pm8001/pm8001_ctl.h create mode 100644 drivers/scsi/pm8001/pm8001_defs.h create mode 100644 drivers/scsi/pm8001/pm8001_hwi.c create mode 100644 drivers/scsi/pm8001/pm8001_hwi.h create mode 100644 drivers/scsi/pm8001/pm8001_init.c create mode 100644 drivers/scsi/pm8001/pm8001_sas.c create mode 100644 drivers/scsi/pm8001/pm8001_sas.h diff --git a/MAINTAINERS b/MAINTAINERS index a1a2aceca5bd..016411cadc9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4116,6 +4116,13 @@ W: http://www.pmc-sierra.com/ S: Supported F: drivers/scsi/pmcraid.* +PMC SIERRA PM8001 DRIVER +M: jack_wang@usish.com +M: lindar_liu@usish.com +L: linux-scsi@vger.kernel.org +S: Supported +F: drivers/scsi/pm8001/ + POSIX CLOCKS and TIMERS M: Thomas Gleixner S: Supported diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e11cca4c784c..2e4f7d0ee639 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1818,6 +1818,14 @@ config SCSI_PMCRAID ---help--- This driver supports the PMC SIERRA MaxRAID adapters. +config SCSI_PM8001 + tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver" + depends on PCI && SCSI + select SCSI_SAS_LIBSAS + help + This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip + based host adapters. + config SCSI_SRP tristate "SCSI RDMA Protocol helper library" depends on SCSI && PCI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3ad61db5e3fa..53b1dac7e7d9 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ obj-$(CONFIG_SCSI_AACRAID) += aacraid/ obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ +obj-$(CONFIG_SCSI_PM8001) += pm8001/ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile new file mode 100644 index 000000000000..52f04296171c --- /dev/null +++ b/drivers/scsi/pm8001/Makefile @@ -0,0 +1,12 @@ +# +# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver +# +# Copyright (C) 2008-2009 USI Co., Ltd. + + +obj-$(CONFIG_SCSI_PM8001) += pm8001.o +pm8001-y += pm8001_init.o \ + pm8001_sas.o \ + pm8001_ctl.o \ + pm8001_hwi.o + diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h new file mode 100644 index 000000000000..4efa4d0950e5 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_chips.h @@ -0,0 +1,89 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_CHIPS_H_ +#define _PM8001_CHIPS_H_ + +static inline u32 pm8001_read_32(void *virt_addr) +{ + return *((u32 *)virt_addr); +} + +static inline void pm8001_write_32(void *addr, u32 offset, u32 val) +{ + *((u32 *)(addr + offset)) = val; +} + +static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 offset) +{ + return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset); +} + +static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 addr, u32 val) +{ + writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr); +} +static inline u32 pm8001_mr32(void __iomem *addr, u32 offset) +{ + return readl(addr + offset); +} +static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val) +{ + writel(val, addr + offset); +} +static inline u32 get_pci_bar_index(u32 pcibar) +{ + switch (pcibar) { + case 0x18: + case 0x1C: + return 1; + case 0x20: + return 2; + case 0x24: + return 3; + default: + return 0; + } +} + +#endif /* _PM8001_CHIPS_H_ */ + diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c new file mode 100644 index 000000000000..14b13acae6dd --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -0,0 +1,573 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#include +#include "pm8001_sas.h" +#include "pm8001_ctl.h" + +/* scsi host attributes */ + +/** + * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.interface_rev); +} +static +DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); + +/** + * pm8001_ctl_fw_version_show - firmware version + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); +} +static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); +/** + * pm8001_ctl_max_out_io_show - max outstanding io supported + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.max_out_io); +} +static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); +/** + * pm8001_ctl_max_devices_show - max devices support + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); +} +static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); +/** + * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no + * hardware limitation + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); +} +static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); + +#define SAS_1_0 0x1 +#define SAS_1_1 0x2 +#define SAS_2_0 0x4 + +static ssize_t +show_sas_spec_support_status(unsigned int mode, char *buf) +{ + ssize_t len = 0; + + if (mode & SAS_1_1) + len = sprintf(buf, "%s", "SAS1.1"); + if (mode & SAS_2_0) + len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0"); + len += sprintf(buf + len, "\n"); + + return len; +} + +/** + * pm8001_ctl_sas_spec_support_show - sas spec supported + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + unsigned int mode; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + return show_sas_spec_support_status(mode, buf); +} +static DEVICE_ATTR(sas_spec_support, S_IRUGO, + pm8001_ctl_sas_spec_support_show, NULL); + +/** + * pm8001_ctl_sas_address_show - sas address + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); +} +static DEVICE_ATTR(host_sas_address, S_IRUGO, + pm8001_ctl_host_sas_address_show, NULL); + +/** + * pm8001_ctl_logging_level_show - logging level + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t pm8001_ctl_logging_level_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level); +} +static ssize_t pm8001_ctl_logging_level_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + pm8001_ha->logging_level = val; + return strlen(buf); +} + +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, + pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store); +/** + * pm8001_ctl_aap_log_show - aap1 event log + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int i; +#define AAP1_MEMMAP(r, c) \ + (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \ + + (c))) + + char *str = buf; + int max = 2; + for (i = 0; i < max; i++) { + str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" + "0x%08x 0x%08x\n", + AAP1_MEMMAP(i, 0), + AAP1_MEMMAP(i, 4), + AAP1_MEMMAP(i, 8), + AAP1_MEMMAP(i, 12), + AAP1_MEMMAP(i, 16), + AAP1_MEMMAP(i, 20), + AAP1_MEMMAP(i, 24), + AAP1_MEMMAP(i, 28)); + } + + return str - buf; +} +static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); +/** + * pm8001_ctl_aap_log_show - IOP event log + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; +#define IOP_MEMMAP(r, c) \ + (*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \ + + (c))) + int i; + char *str = buf; + int max = 2; + for (i = 0; i < max; i++) { + str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" + "0x%08x 0x%08x\n", + IOP_MEMMAP(i, 0), + IOP_MEMMAP(i, 4), + IOP_MEMMAP(i, 8), + IOP_MEMMAP(i, 12), + IOP_MEMMAP(i, 16), + IOP_MEMMAP(i, 20), + IOP_MEMMAP(i, 24), + IOP_MEMMAP(i, 28)); + } + + return str - buf; +} +static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); + +#define FLASH_CMD_NONE 0x00 +#define FLASH_CMD_UPDATE 0x01 +#define FLASH_CMD_SET_NVMD 0x02 + +struct flash_command { + u8 command[8]; + int code; +}; + +static struct flash_command flash_command_table[] = +{ + {"set_nvmd", FLASH_CMD_SET_NVMD}, + {"update", FLASH_CMD_UPDATE}, + {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ +}; + +struct error_fw { + char *reason; + int err_code; +}; + +static struct error_fw flash_error_table[] = +{ + {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE}, + {"image header mismatch", FLASH_UPDATE_HDR_ERR}, + {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR}, + {"image CRC Error", FLASH_UPDATE_CRC_ERR}, + {"image length Error.", FLASH_UPDATE_LENGTH_ERR}, + {"Failed to program flash chip", FLASH_UPDATE_HW_ERR}, + {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED}, + {"Flash update disabled.", FLASH_UPDATE_DISABLED}, + {"Flash in progress", FLASH_IN_PROGRESS}, + {"Image file size Error", FAIL_FILE_SIZE}, + {"Input parameter error", FAIL_PARAMETERS}, + {"Out of memory", FAIL_OUT_MEMORY}, + {"OK", 0} /* Last entry err_code = 0. */ +}; + +static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer = NULL; + u32 length = 0; + u32 ret = 0; + + length = 1024 * 5 + sizeof(*payload) - 1; + ioctlbuffer = kzalloc(length, GFP_KERNEL); + if (!ioctlbuffer) + return -ENOMEM; + if ((pm8001_ha->fw_image->size <= 0) || + (pm8001_ha->fw_image->size > 4096)) { + ret = FAIL_FILE_SIZE; + goto out; + } + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + pm8001_ha->fw_image->size); + payload->length = pm8001_ha->fw_image->size; + payload->id = 0; + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); + wait_for_completion(&completion); +out: + kfree(ioctlbuffer); + return ret; +} + +static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer = NULL; + u32 length = 0; + struct fw_control_info *fwControl; + u32 loopNumber, loopcount = 0; + u32 sizeRead = 0; + u32 partitionSize, partitionSizeTmp; + u32 ret = 0; + u32 partitionNumber = 0; + struct pm8001_fw_image_header *image_hdr; + + length = 1024 * 16 + sizeof(*payload) - 1; + ioctlbuffer = kzalloc(length, GFP_KERNEL); + image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; + if (!ioctlbuffer) + return -ENOMEM; + if (pm8001_ha->fw_image->size < 28) { + ret = FAIL_FILE_SIZE; + goto out; + } + + while (sizeRead < pm8001_ha->fw_image->size) { + partitionSizeTmp = + *(u32 *)((u8 *)&image_hdr->image_length + sizeRead); + partitionSize = be32_to_cpu(partitionSizeTmp); + loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE; + if (loopcount % IOCTL_BUF_SIZE) + loopcount++; + if (loopcount == 0) + loopcount++; + for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + payload->length = 1024*16; + payload->id = 0; + fwControl = + (struct fw_control_info *)payload->func_specific; + fwControl->len = IOCTL_BUF_SIZE; /* IN */ + fwControl->size = partitionSize + HEADER_LEN;/* IN */ + fwControl->retcode = 0;/* OUT */ + fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */ + + /* for the last chunk of data in case file size is not even with + 4k, load only the rest*/ + if (((loopcount-loopNumber) == 1) && + ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) { + fwControl->len = + (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; + memcpy((u8 *)fwControl->buffer, + (u8 *)pm8001_ha->fw_image->data + sizeRead, + (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE); + sizeRead += + (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; + } else { + memcpy((u8 *)fwControl->buffer, + (u8 *)pm8001_ha->fw_image->data + sizeRead, + IOCTL_BUF_SIZE); + sizeRead += IOCTL_BUF_SIZE; + } + + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); + wait_for_completion(&completion); + if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) { + ret = fwControl->retcode; + kfree(ioctlbuffer); + ioctlbuffer = NULL; + break; + } + } + if (ret) + break; + partitionNumber++; +} +out: + kfree(ioctlbuffer); + return ret; +} +static ssize_t pm8001_store_update_fw(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *cmd_ptr, *filename_ptr; + int res, i; + int flash_command = FLASH_CMD_NONE; + int err = 0; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + cmd_ptr = kzalloc(count*2, GFP_KERNEL); + + if (!cmd_ptr) { + err = FAIL_OUT_MEMORY; + goto out; + } + + filename_ptr = cmd_ptr + count; + res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); + if (res != 2) { + err = FAIL_PARAMETERS; + goto out1; + } + + for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { + if (!memcmp(flash_command_table[i].command, + cmd_ptr, strlen(cmd_ptr))) { + flash_command = flash_command_table[i].code; + break; + } + } + if (flash_command == FLASH_CMD_NONE) { + err = FAIL_PARAMETERS; + goto out1; + } + + if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) { + err = FLASH_IN_PROGRESS; + goto out1; + } + err = request_firmware(&pm8001_ha->fw_image, + filename_ptr, + pm8001_ha->dev); + + if (err) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Failed to load firmware image file %s," + " error %d\n", filename_ptr, err)); + err = FAIL_OPEN_BIOS_FILE; + goto out1; + } + + switch (flash_command) { + case FLASH_CMD_UPDATE: + pm8001_ha->fw_status = FLASH_IN_PROGRESS; + err = pm8001_update_flash(pm8001_ha); + break; + case FLASH_CMD_SET_NVMD: + pm8001_ha->fw_status = FLASH_IN_PROGRESS; + err = pm8001_set_nvmd(pm8001_ha); + break; + default: + pm8001_ha->fw_status = FAIL_PARAMETERS; + err = FAIL_PARAMETERS; + break; + } + release_firmware(pm8001_ha->fw_image); +out1: + kfree(cmd_ptr); +out: + pm8001_ha->fw_status = err; + + if (!err) + return count; + else + return -err; +} + +static ssize_t pm8001_show_update_fw(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + int i; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + for (i = 0; flash_error_table[i].err_code != 0; i++) { + if (flash_error_table[i].err_code == pm8001_ha->fw_status) + break; + } + if (pm8001_ha->fw_status != FLASH_IN_PROGRESS) + pm8001_ha->fw_status = FLASH_OK; + + return snprintf(buf, PAGE_SIZE, "status=%x %s\n", + flash_error_table[i].err_code, + flash_error_table[i].reason); +} + +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, + pm8001_show_update_fw, pm8001_store_update_fw); +struct device_attribute *pm8001_host_attrs[] = { + &dev_attr_interface_rev, + &dev_attr_fw_version, + &dev_attr_update_fw, + &dev_attr_aap_log, + &dev_attr_iop_log, + &dev_attr_max_out_io, + &dev_attr_max_devices, + &dev_attr_max_sg_list, + &dev_attr_sas_spec_support, + &dev_attr_logging_level, + &dev_attr_host_sas_address, + NULL, +}; + diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h new file mode 100644 index 000000000000..22644de26399 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -0,0 +1,67 @@ + /* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef PM8001_CTL_H_INCLUDED +#define PM8001_CTL_H_INCLUDED + +#define IOCTL_BUF_SIZE 4096 +#define HEADER_LEN 28 +#define SIZE_OFFSET 16 + +struct pm8001_ioctl_payload { + u32 signature; + u16 major_function; + u16 minor_function; + u16 length; + u16 status; + u16 offset; + u16 id; + u8 func_specific[1]; +}; + +#define FLASH_OK 0x000000 +#define FAIL_OPEN_BIOS_FILE 0x000100 +#define FAIL_FILE_SIZE 0x000a00 +#define FAIL_PARAMETERS 0x000b00 +#define FAIL_OUT_MEMORY 0x000c00 +#define FLASH_IN_PROGRESS 0x001000 + +#endif /* PM8001_CTL_H_INCLUDED */ + diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h new file mode 100644 index 000000000000..944afada61ee --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -0,0 +1,112 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_DEFS_H_ +#define _PM8001_DEFS_H_ + +enum chip_flavors { + chip_8001, +}; +#define USI_MAX_MEMCNT 9 +#define PM8001_MAX_DMA_SG SG_ALL +enum phy_speed { + PHY_SPEED_15 = 0x01, + PHY_SPEED_30 = 0x02, + PHY_SPEED_60 = 0x04, +}; + +enum data_direction { + DATA_DIR_NONE = 0x0, /* NO TRANSFER */ + DATA_DIR_IN = 0x01, /* INBOUND */ + DATA_DIR_OUT = 0x02, /* OUTBOUND */ + DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */ +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* driver compile-time configuration */ +#define PM8001_MAX_CCB 512 /* max ccbs supported */ +#define PM8001_MAX_INB_NUM 1 +#define PM8001_MAX_OUTB_NUM 1 +#define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */ + +/* unchangeable hardware details */ +#define PM8001_MAX_PHYS 8 /* max. possible phys */ +#define PM8001_MAX_PORTS 8 /* max. possible ports */ +#define PM8001_MAX_DEVICES 1024 /* max supported device */ + +enum memory_region_num { + AAP1 = 0x0, /* application acceleration processor */ + IOP, /* IO processor */ + CI, /* consumer index */ + PI, /* producer index */ + IB, /* inbound queue */ + OB, /* outbound queue */ + NVMD, /* NVM device */ + DEV_MEM, /* memory for devices */ + CCB_MEM, /* memory for command control block */ +}; +#define PM8001_EVENT_LOG_SIZE (128 * 1024) + +/*error code*/ +enum mpi_err { + MPI_IO_STATUS_SUCCESS = 0x0, + MPI_IO_STATUS_BUSY = 0x01, + MPI_IO_STATUS_FAIL = 0x02, +}; + +/** + * Phy Control constants + */ +enum phy_control_type { + PHY_LINK_RESET = 0x01, + PHY_HARD_RESET = 0x02, + PHY_NOTIFY_ENABLE_SPINUP = 0x10, +}; + +enum pm8001_hba_info_flags { + PM8001F_INIT_TIME = (1U << 0), + PM8001F_RUN_TIME = (1U << 1), +}; + +#endif diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c new file mode 100644 index 000000000000..aa5756fe0574 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -0,0 +1,4371 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include "pm8001_sas.h" + #include "pm8001_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_mr32(address, 0x1C); + pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_mr32(address, 0x20); + pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void __devinit +read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void __devinit +read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int inbQ_num = 1; + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < inbQ_num; i++) { + u32 offset = i * 0x24; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void __devinit +read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int outbQ_num = 1; + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < outbQ_num; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void __devinit +init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int qn = 1; + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < qn; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + 0x00000100 | (0x00000040 << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < qn; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + 256 | (64 << 16) | (1<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = + 0 | (0 << 16) | (0 << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void __devinit +update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, 0x24, + pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, 0x28, + pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_mw32(address, 0x2C, + pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_mw32(address, 0x30, + pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_mw32(address, 0x34, + pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_mw32(address, 0x38, + pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_mw32(address, 0x3C, + pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_mw32(address, 0x40, + pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_mw32(address, 0x44, + pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_mw32(address, 0x48, + pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_mw32(address, 0x4C, + pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_mw32(address, 0x50, + pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_mw32(address, 0x54, + pm8001_ha->main_cfg_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); + pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_mw32(address, 0x60, + pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_mw32(address, 0x64, + pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_mw32(address, 0x6C, + pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_mw32(address, 0x70, + pm8001_ha->main_cfg_tbl.fatal_err_interrupt); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void __devinit +update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void __devinit +update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + 0x1C, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * bar4_shift - function is called to shift BAR base address + * @pm8001_ha : our hba card infomation + * @shiftValue : shifting value in memory bar. + */ +static u32 bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) +{ + u32 regVal; + u32 max_wait_count; + + /* program the inbound AXI translation Lower Address */ + pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); + + /* confirm the setting is written */ + max_wait_count = 1 * 1000 * 1000; /* 1 sec */ + do { + udelay(1); + regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); + } while ((regVal != shiftValue) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW" + " = 0x%x\n", regVal)); + return -1; + } + return 0; +} + +/** + * mpi_set_phys_g3_with_ssc + * @pm8001_ha: our hba card information + * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. + */ +static void __devinit +mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) +{ + u32 offset; + u32 value; + u32 i; + +#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 +#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 +#define PHY_SSC_BIT_SHIFT 13 + + /* + * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) + * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) + */ + if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) + return; + /* set SSC bit of PHY 0 - 3 */ + for (i = 0; i < 4; i++) { + offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; + value = pm8001_cr32(pm8001_ha, 2, offset); + if (SSCbit) + value = value | (0x00000001 << PHY_SSC_BIT_SHIFT); + else + value = value & (~(0x00000001<general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) + return -1; + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value, value1; + u32 max_wait_count; + /* check error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* check AAP error */ + if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* check IOP error */ + if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + return -1; + } + + /* bit 4-31 of scratch pad1 should be zeros if it is not + in error state*/ + if (value & SCRATCH_PAD1_STATE_MASK) { + /* error case */ + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* bit 2, 4-31 of scratch pad2 should be zeros if it is not + in error state */ + if (value1 & SCRATCH_PAD2_STATE_MASK) { + /* error case */ + return -1; + } + + max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ + + /* wait until scratch pad 1 and 2 registers in ready state */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RDY; + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_RDY; + if ((--max_wait_count) == 0) + return -1; + } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); + return 0; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, 0x44); + offset = value & 0x03FFFFFF; + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: %x \n", offset)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + update_inbnd_queue_table(pm8001_ha, 0); + update_outbnd_queue_table(pm8001_ha, 0); + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + mpi_set_open_retry_interval_reg(pm8001_ha, 7); + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + /*This register is a 16-bit timer with a resolution of 1us. This is the + timer used for interrupt delay/coalescing in the PCIe Application Layer. + Zero is not a valid value. A value of 1 in the register will cause the + interrupts to be normal. A value greater than 1 will cause coalescing + delays.*/ + pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); + pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000;/* 1 sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPC_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000; /* 1 sec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + return 0; +} + +/** + * soft_reset_ready_check - Function to check FW is ready for soft reset. + * @pm8001_ha: our hba card information + */ +static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 regVal, regVal1, regVal2; + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + /* read the scratch pad 2 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_FWRDY_RST; + if (regVal == SCRATCH_PAD2_FWRDY_RST) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Firmware is ready for reset .\n")); + } else { + /* Trigger NMI twice via RB6 */ + if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + RB6_ACCESS_REG)); + return -1; + } + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, + RB6_MAGIC_NUMBER_RST); + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); + /* wait for 100 ms */ + mdelay(100); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & + SCRATCH_PAD2_FWRDY_RST; + if (regVal != SCRATCH_PAD2_FWRDY_RST) { + regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1" + "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", + regVal1, regVal2)); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); + return -1; + } + } + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + * @signature: signature in host scratch pad0 register. + */ +static int +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +{ + u32 regVal, toggleVal; + u32 max_wait_count; + u32 regVal1, regVal2, regVal3; + + /* step1: Check FW is ready for soft reset */ + if (soft_reset_ready_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n")); + return -1; + } + + /* step 2: clear NMI status register on AAP1 and IOP, write the same + value to clear */ + /* map 0x60000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + MBIC_AAP1_ADDR_BASE)); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); + /* map 0x70000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + MBIC_IOP_ADDR_BASE)); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); + + /* read the scratch pad 1 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RST; + toggleVal = regVal ^ SCRATCH_PAD1_RST; + + /* set signature in host scratch pad0 register to tell SPC that the + host performs the soft reset */ + pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); + + /* read required registers for confirmming */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE)); + return -1; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and" + " Reset = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); + + /* step 3: host read GSM Configuration and Reset register */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to low */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal &= ~(0x00003b00); + /* host write GSM Configuration and Reset register */ + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM " + "Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); + + /* step 4: */ + /* disable GSM - Read Address Parity Check */ + regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700038 - Read Address Parity Check " + "Enable = 0x%x\n", regVal1)); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" + "is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); + + /* disable GSM - Write Address Parity Check */ + regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700040 - Write Address Parity Check" + " Enable = 0x%x\n", regVal2)); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700040 - Write Address Parity Check " + "Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); + + /* disable GSM - Write Data Parity Check */ + regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x300048 - Write Data Parity Check" + " Enable = 0x%x\n", regVal3)); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable" + "is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); + + /* step 5: delay 10 usec */ + udelay(10); + /* step 5-b: set GPIO-0 output control to tristate anyway */ + if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GPIO_ADDR_BASE)); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GPIO Output Control Register:" + " = 0x%x\n", regVal)); + /* set GPIO-0 output control to tri-state */ + regVal &= 0xFFFFFFFC; + pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); + + /* Step 6: Reset the IOP and AAP1 */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE)); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Top Register before resetting IOP/AAP1" + ":= 0x%x\n", regVal)); + regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 7: Reset the BDMA/OSSP */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Top Register before resetting BDMA/OSSP" + ": = 0x%x\n", regVal)); + regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 8: delay 10 usec */ + udelay(10); + + /* step 9: bring the BDMA and OSSP out of reset */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Top Register before bringing up BDMA/OSSP" + ":= 0x%x\n", regVal)); + regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 10: delay 10 usec */ + udelay(10); + + /* step 11: reads and sets the GSM Configuration and Reset Register */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE)); + return -1; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and " + "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to high */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal |= (GSM_CONFIG_RESET_VALUE); + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM" + " Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); + + /* step 12: Restore GSM - Read Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + /* just for debugging */ + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" + " = 0x%x\n", regVal)); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700038 - Read Address Parity" + " Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); + /* Restore GSM - Write Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700040 - Write Address Parity Check" + " Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); + /* Restore GSM - Write Data Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable" + "is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); + + /* step 13: bring the IOP and AAP1 out of reset */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE)); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 14: delay 10 usec - Normal Mode */ + udelay(10); + /* check Soft Reset Normal mode or Soft Reset HDA mode */ + if (signature == SPC_SOFT_RESET_SIGNATURE) { + /* step 15 (Normal Mode): wait until scratch pad1 register + bit 2 toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + do { + udelay(1); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_RST; + } while ((regVal != toggleVal) && (--max_wait_count)); + + if (!max_wait_count) { + regVal = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_1); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT : ToggleVal 0x%x," + "MSGU_SCRATCH_PAD1 = 0x%x\n", + toggleVal, regVal)); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD2 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_2))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3))); + return -1; + } + + /* step 16 (Normal) - Clear ODMR and ODCR */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + + /* step 17 (Normal Mode): wait for the FW and IOP to get + ready - 1 sec timeout */ + /* Wait for the SPC Configuration Table to be ready */ + if (check_fw_ready(pm8001_ha) == -1) { + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + /* return error if MPI Configuration Table not ready */ + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("FW not ready SCRATCH_PAD1" + " = 0x%x\n", regVal)); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* return error if MPI Configuration Table not ready */ + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("FW not ready SCRATCH_PAD2" + " = 0x%x\n", regVal)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3))); + return -1; + } + } + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + return 0; +} + +static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + u32 regVal; + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPC chip reset. */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal &= ~(SPC_REG_RESET_DEVICE); + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* bring chip reset out of reset */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal |= SPC_REG_RESET_DEVICE; + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_iounmap - which maped when initilized. + * @pm8001_ha: our hba card information + */ +static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +{ + s8 bar, logical = 0; + for (bar = 0; bar < 6; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pm8001_ha->io_mem[logical].memvirtaddr) { + iounmap(pm8001_ha->io_mem[logical].memvirtaddr); + logical++; + } + } +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + + /** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + u32 value; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); + value = (1 << int_vec_idx); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); + +} + +/** + * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); + +} +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); + return; +#endif + pm8001_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); + return; +#endif + pm8001_chip_intx_interrupt_disable(pm8001_ha); + +} + +/** + * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * @circularQ: the inbound queue we want to transfer to HBA. + * @messageSize: the message size of this transfer, normally it is 64 bytes + * @messagePtr: the pointer to message. + */ +static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr) +{ + u32 offset, consumer_index; + struct mpi_msg_hdr *msgHeader; + u8 bcCount = 1; /* only support single buffer */ + + /* Checks is the requested message size can be allocated in this queue*/ + if (messageSize > 64) { + *messagePtr = NULL; + return -1; + } + + /* Stores the new consumer index */ + consumer_index = pm8001_read_32(circularQ->ci_virt); + circularQ->consumer_index = cpu_to_le32(consumer_index); + if (((circularQ->producer_idx + bcCount) % 256) == + circularQ->consumer_index) { + *messagePtr = NULL; + return -1; + } + /* get memory IOMB buffer address */ + offset = circularQ->producer_idx * 64; + /* increment to next bcCount element */ + circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256; + /* Adds that distance to the base of the region virtual address plus + the message header size*/ + msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); + *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); + return 0; +} + +/** + * mpi_build_cmd- build the message queue for transfer, update the PI to FW + * to tell the fw to get this message from IOMB. + * @pm8001_ha: our hba card information + * @circularQ: the inbound queue we want to transfer to HBA. + * @opCode: the operation code represents commands which LLDD and fw recognized. + * @payload: the command payload of each operation command. + */ +static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload) +{ + u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; + u32 responseQueue = 0; + void *pMessage; + + if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("No free mpi buffer \n")); + return -1; + } + + /*Copy to the payload*/ + memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + + /*Build the header*/ + Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) + | ((responseQueue & 0x3F) << 16) + | ((category & 0xF) << 12) | (opCode & 0xFFF)); + + pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); + /*Update the PI to the firmware*/ + pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, + circularQ->pi_offset, circularQ->producer_idx); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx, + circularQ->consumer_index)); + return 0; +} + +static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, u8 bc) +{ + u32 producer_index; + /* free the circular queue buffer elements associated with the message*/ + circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256; + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, + circularQ->consumer_idx); + /* Update the producer index from SPC*/ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx, + circularQ->producer_index)); + return 0; +} + +/** + * mpi_msg_consume- get the MPI message from outbound queue message table. + * @pm8001_ha: our hba card information + * @circularQ: the outbound queue table. + * @messagePtr1: the message contents of this outbound message. + * @pBC: the message size. + */ +static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC) +{ + struct mpi_msg_hdr *msgHeader; + __le32 msgHeader_tmp; + u32 header_tmp; + do { + /* If there are not-yet-delivered messages ... */ + if (circularQ->producer_index != circularQ->consumer_idx) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("process an IOMB\n")); + /*Get the pointer to the circular queue buffer element*/ + msgHeader = (struct mpi_msg_hdr *) + (circularQ->base_virt + + circularQ->consumer_idx * 64); + /* read header */ + header_tmp = pm8001_read_32(msgHeader); + msgHeader_tmp = cpu_to_le32(header_tmp); + if (0 != (msgHeader_tmp & 0x80000000)) { + if (OPC_OUB_SKIP_ENTRY != + (msgHeader_tmp & 0xfff)) { + *messagePtr1 = + ((u8 *)msgHeader) + + sizeof(struct mpi_msg_hdr); + *pBC = (u8)((msgHeader_tmp >> 24) & + 0x1f); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("mpi_msg_consume" + ": CI=%d PI=%d msgHeader=%x\n", + circularQ->consumer_idx, + circularQ->producer_index, + msgHeader_tmp)); + return MPI_IO_STATUS_SUCCESS; + } else { + u32 producer_index; + void *pi_virt = circularQ->pi_virt; + /* free the circular queue buffer + elements associated with the message*/ + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((msgHeader_tmp >> 24) & 0x1f)) + % 256; + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, + circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); + /* Update the producer index from SPC */ + producer_index = + pm8001_read_32(pi_virt); + circularQ->producer_index = + cpu_to_le32(producer_index); + } + } else + return MPI_IO_STATUS_FAIL; + } + } while (circularQ->producer_index != circularQ->consumer_idx); + /* while we don't have any more not-yet-delivered message */ + /* report empty */ + return MPI_IO_STATUS_BUSY; +} + +static void pm8001_work_queue(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q); + struct pm8001_device *pm8001_dev; + struct domain_device *dev; + + switch (wq->handler) { + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dev = wq->data; + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + pm8001_dev = wq->data; + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_IN_ERROR: + pm8001_dev = wq->data; + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dev = wq->data; + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + } + list_del(&wq->entry); + kfree(wq); +} + +static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, + int handler) +{ + struct pm8001_wq *wq; + int ret = 0; + + wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC); + if (wq) { + wq->pm8001_ha = pm8001_ha; + wq->data = data; + wq->handler = handler; + INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue); + list_add_tail(&wq->entry, &pm8001_ha->wq_list); + schedule_delayed_work(&wq->work_q, 0); + } else + ret = -ENOMEM; + + return ret; +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static int +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + + PM8001_IO_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_COMP\n")); + t = ccb->task; + + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return -1; + ts = &t->task_status; + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" + ",param = %d \n", param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag \n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW" + ",param = %d \n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" + "NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_satus = %x \n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" + " io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } + return 0; +} + +/*See the comments for mpi_ssp_completion */ +static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + u32 dev_id = le32_to_cpu(psspPayload->device_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return -1; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id = %x,device_id = %x\n", + port_id, dev_id)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" + "_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" + "NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); + return 0; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" + " event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } + return 0; +} + +/*See the comments for mpi_ssp_completion */ +static int +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 param; + u32 status; + u32 tag; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sata IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return -1; + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large \n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag \n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" + "_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*in order to force CPU ordering*/ + t->task_done(t); + return 0; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + t->task_done(t); + return 0; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" + "NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES" + "_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto*/ + t->task_done(t); + return 0; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + t->task_done(t); + return 0; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + t->task_done(t); + return 0; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + t->task_done(t); + } + return 0; +} + +/*See the comments for mpi_ssp_completion */ +static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sata IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return -1; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id = %x,device_id = %x\n", + port_id, dev_id)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" + "_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + t->task_done(t); + return 0; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" + "NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } + return 0; +} + +/*See the comments for mpi_ssp_completion */ +static int +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return -1; + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" + "NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" + " io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } + return 0; +} + +static void +mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct set_dev_state_resp *pPayload = + (struct set_dev_state_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + struct pm8001_device *pm8001_dev = ccb->device; + u32 status = le32_to_cpu(pPayload->status); + u32 device_id = le32_to_cpu(pPayload->device_id); + u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; + u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " + "from 0x%x to 0x%x status = 0x%x!\n", + device_id, pds, nds, status)); + complete(pm8001_dev->setds_completion); + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, tag); +} + +static void +mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + complete(pm8001_ha->nvmd_completion); + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n")); + if ((dlen_status & NVMD_STAT) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Set nvm data error!\n")); + return; + } + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, tag); +} + +static void +mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct fw_control_ex *fw_control_context; + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + u32 ir_tds_bn_dps_das_nvm = + le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); + void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + fw_control_context = ccb->fw_control_context; + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); + if ((dlen_status & NVMD_STAT) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Get nvm data error!\n")); + complete(pm8001_ha->nvmd_completion); + return; + } + + if (ir_tds_bn_dps_das_nvm & IPMode) { + /* indirect mode - IR bit set */ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Get NVMD success, IR=1\n")); + if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { + if (ir_tds_bn_dps_das_nvm == 0x80a80200) { + memcpy(pm8001_ha->sas_addr, + ((u8 *)virt_addr + 4), + SAS_ADDR_SIZE); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Get SAS address" + " from VPD successfully!\n")); + } + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || + ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { + ; + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { + ; + } else { + /* Should not be happened*/ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("(IR=1)Wrong Device type 0x%x\n", + ir_tds_bn_dps_das_nvm)); + } + } else /* direct mode */{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", + (dlen_status & NVMD_LEN) >> 24)); + } + memcpy((void *)(fw_control_context->usrAddr), + (void *)(pm8001_ha->memoryMap.region[NVMD].virt_ptr), + fw_control_context->len); + complete(pm8001_ha->nvmd_completion); + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, tag); +} + +static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct local_phy_ctl_resp *pPayload = + (struct local_phy_ctl_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; + u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; + if (status != 0) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("%x phy execute %x phy op failed! \n", + phy_id, phy_op)); + } else + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("%x phy execute %x phy op success! \n", + phy_id, phy_op)); + return 0; +} + +/** + * pm8001_bytes_dmaed - one of the interface function communication with libsas + * @pm8001_ha: our hba card information + * @i: which phy that received the event. + * + * when HBA driver received the identify done event or initiate FIS received + * event(for SATA), it will invoke this function to notify the sas layer that + * the sas toplogy has formed, please discover the the whole sas domain, + * while receive a broadcast(change) primitive just tell the sas + * layer to discover the changed domain rather than the whole domain. + */ +static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha; + if (!phy->phy_attached) + return; + + sas_ha = pm8001_ha->sas; + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = phy->maximum_linkrate; + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i)); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); +} + +/* Get the link rate speed */ +static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +{ + struct sas_phy *sas_phy = phy->sas_phy.phy; + + switch (link_rate) { + case PHY_SPEED_60: + phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; + sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS; + sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; +} + +/** + * asd_get_attached_sas_addr -- extract/generate attached SAS address + * @phy: pointer to asd_phy + * @sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, + u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); + addr += phy->sas_phy.id; + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +/** + * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = 1; + payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0x0F) << 4) | (port_id & 0x0F)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); +} + +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SAS_PHY_UP \n")); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unkown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEV) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != NO_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d \n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInReset portID %d \n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = %x\n", + portstate)); + break; + + } +} + +/** + * mpi_reg_resp -process register device ID response. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + * + * when sas layer find a device it will notify LLDD, then the driver register + * the domain device to FW, this event is the return device ID which the FW + * has assigned, from now,inter-communication with FW is no longer using the + * SAS address, use device ID which FW assigned. + */ +static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + u32 htag; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + htag = le32_to_cpu(registerRespPayload->tag); + ccb = &pm8001_ha->ccb_info[registerRespPayload->tag]; + pm8001_dev = ccb->device; + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" register device is status = %d\n", status)); + switch (status) { + case DEVREG_SUCCESS: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n")); + pm8001_dev->device_id = device_id; + break; + case DEVREG_FAILURE_OUT_OF_RESOURCE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n")); + break; + case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n")); + break; + case DEVREG_FAILURE_INVALID_PHY_ID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n")); + break; + case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n")); + break; + case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n")); + break; + case DEVREG_FAILURE_PORT_NOT_VALID_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n")); + break; + case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n")); + break; + } + complete(pm8001_dev->dcompletion); + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, htag); + return 0; +} + +static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + if (status != 0) + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" deregister device failed ,status = %x" + ", device_id = %x\n", status, device_id)); + return 0; +} + +static int +mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + struct fw_control_ex fw_control_context; + struct fw_flash_Update_resp *ppayload = + (struct fw_flash_Update_resp *)(piomb + 4); + u32 tag = le32_to_cpu(ppayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + status = le32_to_cpu(ppayload->status); + memcpy(&fw_control_context, + ccb->fw_control_context, + sizeof(fw_control_context)); + switch (status) { + case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n")); + break; + case FLASH_UPDATE_IN_PROGRESS: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n")); + break; + case FLASH_UPDATE_HDR_ERR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_HDR_ERR\n")); + break; + case FLASH_UPDATE_OFFSET_ERR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n")); + break; + case FLASH_UPDATE_CRC_ERR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_CRC_ERR\n")); + break; + case FLASH_UPDATE_LENGTH_ERR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n")); + break; + case FLASH_UPDATE_HW_ERR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_HW_ERR\n")); + break; + case FLASH_UPDATE_DNLD_NOT_SUPPORTED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n")); + break; + case FLASH_UPDATE_DISABLED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(": FLASH_UPDATE_DISABLED\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("No matched status = %d\n", status)); + break; + } + ccb->fw_control_context->fw_control->retcode = status; + pci_free_consistent(pm8001_ha->pdev, + fw_control_context.len, + fw_control_context.virtAddr, + fw_control_context.phys_addr); + complete(pm8001_ha->nvmd_completion); + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, tag); + return 0; +} + +static int +mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + u32 status; + int i; + struct general_event_resp *pPayload = + (struct general_event_resp *)(piomb + 4); + status = le32_to_cpu(pPayload->status); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" status = 0x%x\n", status)); + for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i, + pPayload->inb_IOMB_payload[i])); + return 0; +} + +static int +mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status ; + u32 tag, scp; + struct task_status_struct *ts; + + struct task_abort_resp *pPayload = + (struct task_abort_resp *)(piomb + 4); + ccb = &pm8001_ha->ccb_info[pPayload->tag]; + t = ccb->task; + ts = &t->task_status; + + if (t == NULL) + return -1; + + status = le32_to_cpu(pPayload->status); + tag = le32_to_cpu(pPayload->tag); + scp = le32_to_cpu(pPayload->scp); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" status = 0x%x\n", status)); + if (status != 0) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task abort failed tag = 0x%x," + " scp= 0x%x\n", tag, scp)); + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + break; + case IO_NOT_VALID: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); + ts->resp = TMF_RESP_FUNC_FAILED; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag); + mb(); + t->task_done(t); + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u16 eventType = + (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("outbound queue HW event & event type : ")); + switch (eventType) { + case HW_EVENT_PHY_START_STATUS: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS" + " status = %x\n", status)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + break; + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS \n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP \n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_PHY_STOP_STATUS: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_STOP_STATUS " + "status = %x\n", status)); + if (status == 0) + phy->phy_state = 0; + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN \n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n")); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER \n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type = %x\n", eventType)); + break; + } + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 pHeader = (u32)*(u32 *)piomb; + u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:\n")); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT \n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP \n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP \n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST \n")); + mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unresgister the deviece \n")); + mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP \n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT \n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_INFO\n")); + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + case OPC_OUB_SAS_RE_INITIALIZE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n", + opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 bc = 0; + u32 ret = MPI_IO_STATUS_FAIL, processedMsgCount = 0; + + circularQ = &pm8001_ha->outbnd_q_tbl[0]; + do { + ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)((u8 *)pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + mpi_msg_free_set(pm8001_ha, circularQ, bc); + processedMsgCount++; + } + if (MPI_IO_STATUS_BUSY == ret) { + u32 producer_idx; + /* Update the producer index from SPC */ + producer_idx = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_idx); + if (circularQ->producer_index == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (100 > processedMsgCount);/*end message processing if hit the + count*/ + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; +static void +pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct pm8001_prd *buf_prd = prd; + + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + buf_prd->im_len.e = 0; + buf_prd++; + } +} + +static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd) +{ + psmp_cmd->tag = cpu_to_le32(hTag); + psmp_cmd->device_id = cpu_to_le32(deviceID); + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); + mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +/** + * pm8001_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + __le64 phys_addr; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for + SAS 1.1 compatible TLR*/ + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = cpu_to_le64(ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0])); + ssp_cmd.addr_low = lower_32_bits(phys_addr); + ssp_cmd.addr_high = upper_32_bits(phys_addr); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); + ssp_cmd.addr_low = lower_32_bits(dma_addr); + ssp_cmd.addr_high = upper_32_bits(dma_addr); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + return 0; +} + +static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + __le64 phys_addr; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + ncg_tag = cpu_to_le32(hdr_tag); + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.ncqtag_atap_dir_m = + cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir); + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = cpu_to_le64(ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0])); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + return 0; +} + +/** + * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both + ** [14] 0b disable spin up hold; 1b enable spin up hold + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + return 0; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + return 0; +} + +/** + * see comments on mpi_reg_resp. + */ +static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + u32 rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + if (flag == 1) + stp_sspsmp_sata = 0x02; /*direct attached sata */ + else { + if (pm8001_dev->dev_type == SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEV || + pm8001_dev->dev_type == EDGE_DEV || + pm8001_dev->dev_type == FANOUT_DEV) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + opc = OPC_INB_REG_DEV; + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) | + ((phy_id & 0x0F) << 4)); + payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) * 0x1000000) | + ((stp_sspsmp_sata & 0x03) * 0x10000000)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return 0; +} + +/** + * see comments on mpi_reg_resp. + */ +static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + struct dereg_dev_req payload; + u32 opc = OPC_INB_DEREG_DEV_HANDLE; + struct inbound_queue_table *circularQ; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset((u8 *)&payload, 0, sizeof(payload)); + payload.tag = 1; + payload.device_id = cpu_to_le32(device_id); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unregister device device_id = %d\n", device_id)); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return 0; +} + +/** + * pm8001_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = 1; + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return 0; +} + +static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static void +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_chip_interrupt_disable(pm8001_ha); + process_oq(pm8001_ha); + pm8001_chip_interrupt_enable(pm8001_ha); +} + +static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, + u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag) +{ + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&task_abort, 0, sizeof(task_abort)); + if (ABORT_SINGLE == (flag & ABORT_MASK)) { + task_abort.abort_all = 0; + task_abort.device_id = cpu_to_le32(dev_id); + task_abort.tag_to_abort = cpu_to_le32(task_tag); + task_abort.tag = cpu_to_le32(cmd_tag); + } else if (ABORT_ALL == (flag & ABORT_MASK)) { + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(dev_id); + task_abort.tag = cpu_to_le32(cmd_tag); + } + mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + return 0; +} + +/** + * pm8001_chip_abort_task - SAS abort task when error or exception happened. + * @task: the task we wanted to aborted. + * @flag: the abort flag. + */ +static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) +{ + u32 opc, device_id; + int rc = TMF_RESP_FUNC_FAILED; + PM8001_IO_DBG(pm8001_ha, pm8001_printk("Abort tag[%x]", task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEV) + opc = OPC_INB_SSP_ABORT; + else if (pm8001_dev->dev_type == SATA_DEV) + opc = OPC_INB_SATA_ABORT; + else + opc = OPC_INB_SMP_ABORT;/* SMP */ + device_id = pm8001_dev->device_id; + rc = send_task_abort(pm8001_ha, opc, device_id, flag, + task_tag, cmd_tag); + if (rc != TMF_RESP_FUNC_COMPLETE) + PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc)); + return rc; +} + +/** + * pm8001_chip_ssp_tm_req - built the task managment command. + * @pm8001_ha: our hba card information. + * @ccb: the ccb information. + * @tmf: task management function. + */ +static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + u32 opc = OPC_INB_SSPINITMSTART; + struct inbound_queue_table *circularQ; + struct ssp_ini_tm_start_req sspTMCmd; + + memset(&sspTMCmd, 0, sizeof(sspTMCmd)); + sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); + sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed); + sspTMCmd.tmf = cpu_to_le32(tmf->tmf); + sspTMCmd.ds_ads_m = cpu_to_le32(1 << 2); + memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); + sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + return 0; +} + +static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_GET_NVMD_DATA; + u32 nvmd_type; + u32 rc; + u32 tag; + struct pm8001_ccb_info *ccb; + struct inbound_queue_table *circularQ; + struct get_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->len = ioctl_payload->length; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&nvmd_req, 0, sizeof(nvmd_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->ccb_tag = tag; + ccb->fw_control_context = fw_control_context; + nvmd_req.tag = cpu_to_le32(tag); + + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case VPD_FLASH: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case EXPAN_ROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + default: + break; + } + mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + return 0; +} + +static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_SET_NVMD_DATA; + u32 nvmd_type; + u32 rc; + u32 tag; + struct pm8001_ccb_info *ccb; + struct inbound_queue_table *circularQ; + struct set_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, + ioctl_payload->func_specific, + ioctl_payload->length); + memset(&nvmd_req, 0, sizeof(nvmd_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->fw_control_context = fw_control_context; + ccb->ccb_tag = tag; + nvmd_req.tag = cpu_to_le32(tag); + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case VPD_FLASH: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case EXPAN_ROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + default: + break; + } + mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + return 0; +} + +/** + * pm8001_chip_fw_flash_update_build - support the firmware update operation + * @pm8001_ha: our hba card information. + * @fw_flash_updata_info: firmware flash update param + */ +static int +pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag) +{ + struct fw_flash_Update_req payload; + struct fw_flash_updata_info *info; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_FW_FLASH_UPDATE; + + memset((u8 *)&payload, 0, sizeof(struct fw_flash_Update_req)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + info = fw_flash_updata_info; + payload.tag = cpu_to_le32(tag); + payload.cur_image_len = cpu_to_le32(info->cur_image_len); + payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); + payload.total_image_len = cpu_to_le32(info->total_image_len); + payload.len = info->sgl.im_len.len ; + payload.sgl_addr_lo = lower_32_bits(info->sgl.addr); + payload.sgl_addr_hi = upper_32_bits(info->sgl.addr); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return 0; +} + +static int +pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + struct fw_flash_updata_info flash_update_info; + struct fw_control_info *fw_control; + struct fw_control_ex *fw_control_context; + u32 rc; + u32 tag; + struct pm8001_ccb_info *ccb; + void *buffer = NULL; + dma_addr_t phys_addr; + u32 phys_addr_hi; + u32 phys_addr_lo; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; + if (fw_control->len != 0) { + if (pm8001_mem_alloc(pm8001_ha->pdev, + (void **)&buffer, + &phys_addr, + &phys_addr_hi, + &phys_addr_lo, + fw_control->len, 0) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Mem alloc failure\n")); + return -ENOMEM; + } + } + memset((void *)buffer, 0, fw_control->len); + memcpy((void *)buffer, fw_control->buffer, fw_control->len); + flash_update_info.sgl.addr = cpu_to_le64(phys_addr); + flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); + flash_update_info.sgl.im_len.e = 0; + flash_update_info.cur_image_offset = fw_control->offset; + flash_update_info.cur_image_len = fw_control->len; + flash_update_info.total_image_len = fw_control->size; + fw_control_context->fw_control = fw_control; + fw_control_context->virtAddr = buffer; + fw_control_context->len = fw_control->len; + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->fw_control_context = fw_control_context; + ccb->ccb_tag = tag; + pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); + return 0; +} + +static int +pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state) +{ + struct set_dev_state_req payload; + struct inbound_queue_table *circularQ; + struct pm8001_ccb_info *ccb; + u32 rc; + u32 tag; + u32 opc = OPC_INB_SET_DEVICE_STATE; + memset((u8 *)&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->ccb_tag = tag; + ccb->device = pm8001_dev; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.device_id = cpu_to_le32(pm8001_dev->device_id); + payload.nds = cpu_to_le32(state); + mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return 0; + +} + +const struct pm8001_dispatch pm8001_8001_dispatch = { + .name = "pmc8001", + .chip_init = pm8001_chip_init, + .chip_soft_rst = pm8001_chip_soft_rst, + .chip_rst = pm8001_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm8001_chip_isr, + .is_our_interupt = pm8001_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm8001_chip_interrupt_enable, + .interrupt_disable = pm8001_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm8001_chip_smp_req, + .ssp_io_req = pm8001_chip_ssp_io_req, + .sata_req = pm8001_chip_sata_req, + .phy_start_req = pm8001_chip_phy_start_req, + .phy_stop_req = pm8001_chip_phy_stop_req, + .reg_dev_req = pm8001_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm8001_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; + diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h new file mode 100644 index 000000000000..3690a2ba0eb2 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -0,0 +1,1011 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include +#include + + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +#define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +#define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */ +#define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */ +#define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* SMP_RESPONSE is removed */ +#define OPC_INB_SMP_RESPONSE 19 /* 0x013 */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +#define OPC_INB_REG_DEV 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +#define OPC_INB_GET_DEV_INFO 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +#define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +#define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_HW_EVENT 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_DEV_REGIST 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */ +/* SMP_RECEIVED Notification is removed */ +#define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_DEV_INFO 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */ + +/* for phy start*/ +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x04 << 8) + +struct mpi_msg_hdr{ + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (64 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; + u32 reserved[5]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (64 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to discribe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +struct hw_event_resp { + __le32 lr_evt_status_phyid_portid; + __le32 evt_param; + __le32 npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_retry; + __le32 firstburstsize_ITNexustimeout; + u32 sas_addr_hi; + u32 sas_addr_low; + __le32 upper_device_id; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ + +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ +struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x0000000F + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ + +struct hw_event_ack_req { + __le32 tag; + __le32 sea_phyid_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET esponse + * use to indicate a SATA Completion (64 bytes) + */ + +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ + +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 _r_a[12]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req{ + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; + u32 addr_low; + u32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; +} __attribute__((packed, aligned(4))); + + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ +struct fw_flash_Update_resp { + dma_addr_t tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 + +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x43 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */ +#define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */ +#define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */ +#define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */ +#define MSGU_IBDB_SET 0x04 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */ +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x3C/* RevB */ +#define MSGU_ODCR 0x40/* RevB */ +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 +#define MSGU_ODMR 0x74/* RevB */ + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0/* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD1_POR 0x00 /* power on reset state */ +#define SCRATCH_PAD1_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD1_ERR 0x02 /* error state */ +#define SCRATCH_PAD1_RDY 0x03 /* ready state */ +#define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */ +#define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */ +#define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1 + Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */ +#define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1 + Reserved bit 3 to 9 */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08/* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */ +#define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */ +#define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */ +#define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */ +#define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */ +#define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */ +#define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */ +#define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */ +#define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */ +#define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */ +#define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */ +#define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */ +#define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */ +#define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */ +#define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */ +#define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +#define GST_PHYSTATE_OFFSET 0x18 +#define GST_PHYSTATE0_OFFSET 0x18 +#define GST_PHYSTATE1_OFFSET 0x1C +#define GST_PHYSTATE2_OFFSET 0x20 +#define GST_PHYSTATE3_OFFSET 0x24 +#define GST_PHYSTATE4_OFFSET 0x28 +#define GST_PHYSTATE5_OFFSET 0x2C +#define GST_PHYSTATE6_OFFSET 0x30 +#define GST_PHYSTATE7_OFFSET 0x34 +#define GST_RERRINFO_OFFSET 0x44 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C +/* signature defintion for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit difination for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPC_IBW_AXI_TRANSLATION_LOW 0x003258 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#endif + diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c new file mode 100644 index 000000000000..811b5d36d5f0 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -0,0 +1,888 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include "pm8001_sas.h" +#include "pm8001_chips.h" + +static struct scsi_transport_template *pm8001_stt; + +static const struct pm8001_chip_info pm8001_chips[] = { + [chip_8001] = { 8, &pm8001_8001_dispatch,}, +}; +static int pm8001_id; + +LIST_HEAD(hba_list); + +/** + * The main structure which LLDD must register for scsi core. + */ +static struct scsi_host_template pm8001_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = pm8001_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = pm8001_scan_finished, + .scan_start = pm8001_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .slave_alloc = pm8001_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, + .shost_attrs = pm8001_host_attrs, +}; + +/** + * Sas layer call this function to execute specific task. + */ +static struct sas_domain_function_template pm8001_transport_ops = { + .lldd_dev_found = pm8001_dev_found, + .lldd_dev_gone = pm8001_dev_gone, + + .lldd_execute_task = pm8001_queue_command, + .lldd_control_phy = pm8001_phy_control, + + .lldd_abort_task = pm8001_abort_task, + .lldd_abort_task_set = pm8001_abort_task_set, + .lldd_clear_aca = pm8001_clear_aca, + .lldd_clear_task_set = pm8001_clear_task_set, + .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, + .lldd_lu_reset = pm8001_lu_reset, + .lldd_query_task = pm8001_query_task, +}; + +/** + *pm8001_phy_init - initiate our adapter phys + *@pm8001_ha: our hba structure. + *@phy_id: phy id. + */ +static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, + int phy_id) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + phy->phy_state = 0; + phy->pm8001_ha = pm8001_ha; + sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + sas_phy->id = phy_id; + sas_phy->sas_addr = &pm8001_ha->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; + sas_phy->lldd_phy = phy; +} + +/** + *pm8001_free - free hba + *@pm8001_ha: our hba structure. + * + */ +static void pm8001_free(struct pm8001_hba_info *pm8001_ha) +{ + int i; + struct pm8001_wq *wq; + + if (!pm8001_ha) + return; + + for (i = 0; i < USI_MAX_MEMCNT; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + pci_free_consistent(pm8001_ha->pdev, + pm8001_ha->memoryMap.region[i].element_size, + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } + PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); + if (pm8001_ha->shost) + scsi_host_put(pm8001_ha->shost); + list_for_each_entry(wq, &pm8001_ha->wq_list, entry) + cancel_delayed_work(&wq->work_q); + kfree(pm8001_ha->tags); + kfree(pm8001_ha); +} + +#ifdef PM8001_USE_TASKLET +static void pm8001_tasklet(unsigned long opaque) +{ + struct pm8001_hba_info *pm8001_ha; + pm8001_ha = (struct pm8001_hba_info *)opaque;; + if (unlikely(!pm8001_ha)) + BUG_ON(1); + PM8001_CHIP_DISP->isr(pm8001_ha); +} +#endif + + + /** + * pm8001_interrupt - when HBA originate a interrupt,we should invoke this + * dispatcher to handle each case. + * @irq: irq number. + * @opaque: the passed general host adapter struct + */ +static irqreturn_t pm8001_interrupt(int irq, void *opaque) +{ + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + struct sas_ha_struct *sha = opaque; + pm8001_ha = sha->lldd_ha; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha); +#endif + return ret; +} + +/** + * pm8001_alloc - initiate our hba structure and 6 DMAs area. + * @pm8001_ha:our hba structure. + * + */ +static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +{ + int i; + spin_lock_init(&pm8001_ha->lock); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) + pm8001_phy_init(pm8001_ha, i); + + pm8001_ha->tags = kmalloc(sizeof(*pm8001_ha->tags)*PM8001_MAX_DEVICES, + GFP_KERNEL); + + /* MPI Memory region 1 for AAP Event Log for fw */ + pm8001_ha->memoryMap.region[AAP1].num_elements = 1; + pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].alignment = 32; + + /* MPI Memory region 2 for IOP Event Log for fw */ + pm8001_ha->memoryMap.region[IOP].num_elements = 1; + pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].alignment = 32; + + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI].num_elements = 1; + pm8001_ha->memoryMap.region[CI].element_size = 4; + pm8001_ha->memoryMap.region[CI].total_len = 4; + pm8001_ha->memoryMap.region[CI].alignment = 4; + + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI].num_elements = 1; + pm8001_ha->memoryMap.region[PI].element_size = 4; + pm8001_ha->memoryMap.region[PI].total_len = 4; + pm8001_ha->memoryMap.region[PI].alignment = 4; + + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB].num_elements = 256; + pm8001_ha->memoryMap.region[IB].element_size = 64; + pm8001_ha->memoryMap.region[IB].total_len = 256 * 64; + pm8001_ha->memoryMap.region[IB].alignment = 64; + + /* MPI Memory region 6 inbound queues */ + pm8001_ha->memoryMap.region[OB].num_elements = 256; + pm8001_ha->memoryMap.region[OB].element_size = 64; + pm8001_ha->memoryMap.region[OB].total_len = 256 * 64; + pm8001_ha->memoryMap.region[OB].alignment = 64; + + /* Memory region write DMA*/ + pm8001_ha->memoryMap.region[NVMD].num_elements = 1; + pm8001_ha->memoryMap.region[NVMD].element_size = 4096; + pm8001_ha->memoryMap.region[NVMD].total_len = 4096; + /* Memory region for devices*/ + pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES * + sizeof(struct pm8001_device); + pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES * + sizeof(struct pm8001_device); + + /* Memory region for ccb_info*/ + pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB * + sizeof(struct pm8001_ccb_info); + pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * + sizeof(struct pm8001_ccb_info); + + for (i = 0; i < USI_MAX_MEMCNT; i++) { + if (pm8001_mem_alloc(pm8001_ha->pdev, + &pm8001_ha->memoryMap.region[i].virt_ptr, + &pm8001_ha->memoryMap.region[i].phys_addr, + &pm8001_ha->memoryMap.region[i].phys_addr_hi, + &pm8001_ha->memoryMap.region[i].phys_addr_lo, + pm8001_ha->memoryMap.region[i].total_len, + pm8001_ha->memoryMap.region[i].alignment) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Mem%d alloc failed\n", + i)); + goto err_out; + } + } + + pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; + for (i = 0; i < PM8001_MAX_DEVICES; i++) { + pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].id = i; + pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; + pm8001_ha->devices[i].running_req = 0; + } + pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr; + for (i = 0; i < PM8001_MAX_CCB; i++) { + pm8001_ha->ccb_info[i].ccb_dma_handle = + pm8001_ha->memoryMap.region[CCB_MEM].phys_addr + + i * sizeof(struct pm8001_ccb_info); + ++pm8001_ha->tags_num; + } + pm8001_ha->flags = PM8001F_INIT_TIME; + /* Initialize tags */ + pm8001_tag_init(pm8001_ha); + return 0; +err_out: + return 1; +} + +/** + * pm8001_ioremap - remap the pci high physical address to kernal virtual + * address so that we can access them. + * @pm8001_ha:our hba structure. + */ +static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) +{ + u32 bar; + u32 logicalBar = 0; + struct pci_dev *pdev; + + pdev = pm8001_ha->pdev; + /* map pci mem (PMC pci base 0-3)*/ + for (bar = 0; bar < 6; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { + pm8001_ha->io_mem[logicalBar].membase = + pci_resource_start(pdev, bar); + pm8001_ha->io_mem[logicalBar].membase &= + (u32)PCI_BASE_ADDRESS_MEM_MASK; + pm8001_ha->io_mem[logicalBar].memsize = + pci_resource_len(pdev, bar); + pm8001_ha->io_mem[logicalBar].memvirtaddr = + ioremap(pm8001_ha->io_mem[logicalBar].membase, + pm8001_ha->io_mem[logicalBar].memsize); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PCI: bar %d, logicalBar %d " + "virt_addr=%lx,len=%d\n", bar, logicalBar, + (unsigned long) + pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_ha->io_mem[logicalBar].memsize)); + } else { + pm8001_ha->io_mem[logicalBar].membase = 0; + pm8001_ha->io_mem[logicalBar].memsize = 0; + pm8001_ha->io_mem[logicalBar].memvirtaddr = 0; + } + logicalBar++; + } + return 0; +} + +/** + * pm8001_pci_alloc - initialize our ha card structure + * @pdev: pci device. + * @ent: ent + * @shost: scsi host struct which has been initialized before. + */ +static struct pm8001_hba_info *__devinit +pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost) +{ + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + + pm8001_ha = sha->lldd_ha; + if (!pm8001_ha) + return NULL; + + pm8001_ha->pdev = pdev; + pm8001_ha->dev = &pdev->dev; + pm8001_ha->chip_id = chip_id; + pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; + pm8001_ha->irq = pdev->irq; + pm8001_ha->sas = sha; + pm8001_ha->shost = shost; + pm8001_ha->id = pm8001_id++; + INIT_LIST_HEAD(&pm8001_ha->wq_list); + pm8001_ha->logging_level = 0x01; + sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); +#ifdef PM8001_USE_TASKLET + tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, + (unsigned long)pm8001_ha); +#endif + pm8001_ioremap(pm8001_ha); + if (!pm8001_alloc(pm8001_ha)) + return pm8001_ha; + pm8001_free(pm8001_ha); + return NULL; +} + +/** + * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit + * @pdev: pci device. + */ +static int pci_go_44(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "44-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + return rc; +} + +/** + * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them. + * @shost: scsi host which has been allocated outside. + * @chip_info: our ha struct. + */ +static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost, + const struct pm8001_chip_info *chip_info) +{ + int phy_nr, port_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + phy_nr = chip_info->n_phy; + port_nr = phy_nr; + memset(sha, 0x00, sizeof(*sha)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy) + goto exit; + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_port) + goto exit_free2; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free1; + + shost->transportt = pm8001_stt; + shost->max_id = PM8001_MAX_DEVICES; + shost->max_lun = 8; + shost->max_channel = 0; + shost->unique_id = pm8001_id; + shost->max_cmd_len = 16; + shost->can_queue = PM8001_CAN_QUEUE; + shost->cmd_per_lun = 32; + return 0; +exit_free1: + kfree(arr_port); +exit_free2: + kfree(arr_phy); +exit: + return -1; +} + +/** + * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas + * @shost: scsi host which has been allocated outside + * @chip_info: our ha struct. + */ +static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost, + const struct pm8001_chip_info *chip_info) +{ + int i = 0; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + pm8001_ha = sha->lldd_ha; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; + sha->sas_port[i] = &pm8001_ha->port[i].sas_port; + } + sha->sas_ha_name = DRV_NAME; + sha->dev = pm8001_ha->dev; + + sha->lldd_module = THIS_MODULE; + sha->sas_addr = &pm8001_ha->sas_addr[0]; + sha->num_phys = chip_info->n_phy; + sha->lldd_max_execute_num = 1; + sha->lldd_queue_size = PM8001_CAN_QUEUE; + sha->core.shost = shost; +} + +/** + * pm8001_init_sas_add - initialize sas address + * @chip_info: our ha struct. + * + * Currently we just set the fixed SAS address to our HBA,for manufacture, + * it should read from the EEPROM + */ +static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) +{ + u8 i; +#ifdef PM8001_READ_VPD + DECLARE_COMPLETION_ONSTACK(completion); + pm8001_ha->nvmd_completion = &completion; + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0); + wait_for_completion(&completion); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, + SAS_ADDR_SIZE); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy %d sas_addr = %x \n", i, + (u64)pm8001_ha->phy[i].dev_sas_addr)); + } +#else + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL; + pm8001_ha->phy[i].dev_sas_addr = + cpu_to_be64((u64) + (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); + } + memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, + SAS_ADDR_SIZE); +#endif +} + +#ifdef PM8001_USE_MSIX +/** + * pm8001_setup_msix - enable MSI-X interrupt + * @chip_info: our ha struct. + * @irq_handler: irq_handler + */ +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, + irq_handler_t irq_handler) +{ + u32 i = 0, j = 0; + u32 number_of_intr = 1; + int flag = 0; + u32 max_entry; + int rc; + max_entry = sizeof(pm8001_ha->msix_entries) / + sizeof(pm8001_ha->msix_entries[0]); + flag |= IRQF_DISABLED; + for (i = 0; i < max_entry ; i++) + pm8001_ha->msix_entries[i].entry = i; + rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, + number_of_intr); + pm8001_ha->number_of_intr = number_of_intr; + if (!rc) { + for (i = 0; i < number_of_intr; i++) { + if (request_irq(pm8001_ha->msix_entries[i].vector, + irq_handler, flag, DRV_NAME, + SHOST_TO_SAS_HA(pm8001_ha->shost))) { + for (j = 0; j < i; j++) + free_irq( + pm8001_ha->msix_entries[j].vector, + SHOST_TO_SAS_HA(pm8001_ha->shost)); + pci_disable_msix(pm8001_ha->pdev); + break; + } + } + } + return rc; +} +#endif + +/** + * pm8001_request_irq - register interrupt + * @chip_info: our ha struct. + */ +static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) +{ + struct pci_dev *pdev; + irq_handler_t irq_handler = pm8001_interrupt; + u32 rc; + + pdev = pm8001_ha->pdev; + +#ifdef PM8001_USE_MSIX + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + return pm8001_setup_msix(pm8001_ha, irq_handler); + else + goto intx; +#endif + +intx: + /* intialize the INT-X interrupt */ + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, + SHOST_TO_SAS_HA(pm8001_ha->shost)); + return rc; +} + +/** + * pm8001_pci_probe - probe supported device + * @pdev: pci device which kernel has been prepared for. + * @ent: pci device id + * + * This function is the main initialization function, when register a new + * pci driver it is invoked, all struct an hardware initilization should be done + * here, also, register interrupt + */ +static int __devinit pm8001_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int rc; + u32 pci_reg; + struct pm8001_hba_info *pm8001_ha; + struct Scsi_Host *shost = NULL; + const struct pm8001_chip_info *chip; + + dev_printk(KERN_INFO, &pdev->dev, + "pm8001: driver version %s\n", DRV_VERSION); + rc = pci_enable_device(pdev); + if (rc) + goto err_out_enable; + pci_set_master(pdev); + /* + * Enable pci slot busmaster by setting pci command register. + * This is required by FW for Cyclone card. + */ + + pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg); + pci_reg |= 0x157; + pci_write_config_dword(pdev, PCI_COMMAND, pci_reg); + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + rc = pci_go_44(pdev); + if (rc) + goto err_out_regions; + + shost = scsi_host_alloc(&pm8001_sht, sizeof(void *)); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + chip = &pm8001_chips[ent->driver_data]; + SHOST_TO_SAS_HA(shost) = + kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!SHOST_TO_SAS_HA(shost)) { + rc = -ENOMEM; + goto err_out_free_host; + } + + rc = pm8001_prep_sas_ha_init(shost, chip); + if (rc) { + rc = -ENOMEM; + goto err_out_free; + } + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); + pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + if (!pm8001_ha) { + rc = -ENOMEM; + goto err_out_free; + } + list_add_tail(&pm8001_ha->list, &hba_list); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) + goto err_out_ha_free; + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_ha_free; + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out_shost; + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + pm8001_init_sas_add(pm8001_ha); + pm8001_post_sas_ha_init(shost, chip); + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_out_shost; + scsi_scan_host(pm8001_ha->shost); + return 0; + +err_out_shost: + scsi_remove_host(pm8001_ha->shost); +err_out_ha_free: + pm8001_free(pm8001_ha); +err_out_free: + kfree(SHOST_TO_SAS_HA(shost)); +err_out_free_host: + kfree(shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +static void __devexit pm8001_pci_remove(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int i; + pm8001_ha = sha->lldd_ha; + pci_set_drvdata(pdev, NULL); + sas_unregister_ha(sha); + sas_remove_host(pm8001_ha->shost); + list_del(&pm8001_ha->list); + scsi_remove_host(pm8001_ha->shost); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pm8001_ha->msix_entries[i].vector); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pm8001_ha->msix_entries[i].vector, sha); + pci_disable_msix(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + tasklet_kill(&pm8001_ha->tasklet); +#endif + pm8001_free(pm8001_ha); + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +/** + * pm8001_pci_suspend - power management suspend main entry point + * @pdev: PCI device struct + * @state: PM state change to (usually PCI_D3) + * + * Returns 0 success, anything else error. + */ +static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int i , pos; + u32 device_state; + pm8001_ha = sha->lldd_ha; + flush_scheduled_work(); + scsi_block_requests(pm8001_ha->shost); + pos = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pos == 0) { + printk(KERN_ERR " PCI PM not supported\n"); + return -ENODEV; + } + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pm8001_ha->msix_entries[i].vector); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pm8001_ha->msix_entries[i].vector, sha); + pci_disable_msix(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + tasklet_kill(&pm8001_ha->tasklet); +#endif + device_state = pci_choose_state(pdev, state); + pm8001_printk("pdev=0x%p, slot=%s, entering " + "operating state [D%d]\n", pdev, + pm8001_ha->name, device_state); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, device_state); + return 0; +} + +/** + * pm8001_pci_resume - power management resume main entry point + * @pdev: PCI device struct + * + * Returns 0 success, anything else error. + */ +static int pm8001_pci_resume(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int rc; + u32 device_state; + pm8001_ha = sha->lldd_ha; + device_state = pdev->current_state; + + pm8001_printk("pdev=0x%p, slot=%s, resuming from previous " + "operating state [D%d]\n", pdev, pm8001_ha->name, device_state); + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + rc = pci_enable_device(pdev); + if (rc) { + pm8001_printk("slot=%s Enable device failed during resume\n", + pm8001_ha->name); + goto err_out_enable; + } + + pci_set_master(pdev); + rc = pci_go_44(pdev); + if (rc) + goto err_out_disable; + + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) + goto err_out_disable; + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out_disable; + #ifdef PM8001_USE_TASKLET + tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, + (unsigned long)pm8001_ha); + #endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + scsi_unblock_requests(pm8001_ha->shost); + return 0; + +err_out_disable: + scsi_remove_host(pm8001_ha->shost); + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +static struct pci_device_id __devinitdata pm8001_pci_table[] = { + { + PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 + }, + { + PCI_DEVICE(0x117c, 0x0042), + .driver_data = chip_8001 + }, + {} /* terminate list */ +}; + +static struct pci_driver pm8001_pci_driver = { + .name = DRV_NAME, + .id_table = pm8001_pci_table, + .probe = pm8001_pci_probe, + .remove = __devexit_p(pm8001_pci_remove), + .suspend = pm8001_pci_suspend, + .resume = pm8001_pci_resume, +}; + +/** + * pm8001_init - initialize scsi transport template + */ +static int __init pm8001_init(void) +{ + int rc; + pm8001_id = 0; + pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); + if (!pm8001_stt) + return -ENOMEM; + rc = pci_register_driver(&pm8001_pci_driver); + if (rc) + goto err_out; + return 0; +err_out: + sas_release_transport(pm8001_stt); + return rc; +} + +static void __exit pm8001_exit(void) +{ + pci_unregister_driver(&pm8001_pci_driver); + sas_release_transport(pm8001_stt); +} + +module_init(pm8001_init); +module_exit(pm8001_exit); + +MODULE_AUTHOR("Jack Wang "); +MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pm8001_pci_table); + diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c new file mode 100644 index 000000000000..7bf30fa6963a --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -0,0 +1,1104 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include "pm8001_sas.h" + +/** + * pm8001_find_tag - from sas task to find out tag that belongs to this task + * @task: the task sent to the LLDD + * @tag: the found tag associated with the task + */ +static int pm8001_find_tag(struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct pm8001_ccb_info *ccb; + ccb = task->lldd_task; + *tag = ccb->ccb_tag; + return 1; + } + return 0; +} + +/** + * pm8001_tag_clear - clear the tags bitmap + * @pm8001_ha: our hba struct + * @tag: the found tag associated with the task + */ +static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) +{ + void *bitmap = pm8001_ha->tags; + clear_bit(tag, bitmap); +} + +static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +{ + pm8001_tag_clear(pm8001_ha, tag); +} + +static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) +{ + void *bitmap = pm8001_ha->tags; + set_bit(tag, bitmap); +} + +/** + * pm8001_tag_alloc - allocate a empty tag for task used. + * @pm8001_ha: our hba struct + * @tag_out: the found empty tag . + */ +inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = pm8001_ha->tags; + + index = find_first_zero_bit(bitmap, pm8001_ha->tags_num); + tag = index; + if (tag >= pm8001_ha->tags_num) + return -SAS_QUEUE_FULL; + pm8001_tag_set(pm8001_ha, tag); + *tag_out = tag; + return 0; +} + +void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) +{ + int i; + for (i = 0; i < pm8001_ha->tags_num; ++i) + pm8001_tag_clear(pm8001_ha, i); +} + + /** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + */ +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, + u32 *pphys_addr_lo, u32 mem_size, u32 align) +{ + caddr_t mem_virt_alloc; + dma_addr_t mem_dma_handle; + u64 phys_align; + u64 align_offset = 0; + if (align) + align_offset = (dma_addr_t)align - 1; + mem_virt_alloc = + pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); + if (!mem_virt_alloc) { + pm8001_printk("memory allocation error\n"); + return -1; + } + memset((void *)mem_virt_alloc, 0, mem_size+align); + *pphys_addr = mem_dma_handle; + phys_align = (*pphys_addr + align_offset) & ~align_offset; + *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; + *pphys_addr_hi = upper_32_bits(phys_align); + *pphys_addr_lo = lower_32_bits(phys_align); + return 0; +} +/** + * pm8001_find_ha_by_dev - from domain device which come from sas layer to + * find out our hba struct. + * @dev: the domain device which from sas layer. + */ +static +struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) +{ + struct sas_ha_struct *sha = dev->port->ha; + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return pm8001_ha; +} + +/** + * pm8001_phy_control - this function should be registered to + * sas_domain_function_template to provide libsas used, note: this is just + * control the HBA phy rather than other expander phy if you want control + * other phy, you should use SMP command. + * @sas_phy: which phy in HBA phys. + * @func: the operation. + * @funcdata: always NULL. + */ +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + struct pm8001_hba_info *pm8001_ha = NULL; + struct sas_phy_linkrates *rates; + DECLARE_COMPLETION_ONSTACK(completion); + pm8001_ha = sas_phy->ha->lldd_ha; + pm8001_ha->phy[phy_id].enable_completion = &completion; + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + rates = funcdata; + if (rates->minimum_linkrate) { + pm8001_ha->phy[phy_id].minimum_linkrate = + rates->minimum_linkrate; + } + if (rates->maximum_linkrate) { + pm8001_ha->phy[phy_id].maximum_linkrate = + rates->maximum_linkrate; + } + if (pm8001_ha->phy[phy_id].phy_state == 0) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_HARD_RESET: + if (pm8001_ha->phy[phy_id].phy_state == 0) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_HARD_RESET); + break; + case PHY_FUNC_LINK_RESET: + if (pm8001_ha->phy[phy_id].phy_state == 0) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_DISABLE: + PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); + break; + default: + rc = -EOPNOTSUPP; + } + msleep(300); + return rc; +} + +int pm8001_slave_alloc(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + if (dev_is_sata(dev)) { + /* We don't need to rescan targets + * if REPORT_LUNS request is failed + */ + if (scsi_dev->lun > 0) + return -ENXIO; + scsi_dev->tagged_supported = 1; + } + return sas_slave_alloc(scsi_dev); +} + +/** + * pm8001_scan_start - we should enable all HBA phys by sending the phy_start + * command to HBA. + * @shost: the scsi host data. + */ +void pm8001_scan_start(struct Scsi_Host *shost) +{ + int i; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + pm8001_ha = sha->lldd_ha; + for (i = 0; i < pm8001_ha->chip->n_phy; ++i) + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); +} + +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; +} + +/** + * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to smp task + */ +static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); +} + +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + return 0; +} + +/** + * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to sata task + */ +static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); +} + +/** + * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to TM + * @tmf: the task management IU + */ +static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) +{ + return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); +} + +/** + * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task + */ +static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); +} +int pm8001_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + if (ret) + return ret; + if (dev_is_sata(dev)) { + #ifdef PM8001_DISABLE_NCQ + struct ata_port *ap = dev->sata_dev.ap; + struct ata_device *adev = ap->link.device; + adev->flags |= ATA_DFLAG_NCQ_OFF; + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + #endif + } + return 0; +} +/** + * pm8001_task_exec -execute the task which come from upper level, send the + * command or data to DMA area and then increase CI,for queuecommand(ssp), + * it is from upper layer and for smp command,it is from libsas, + * for ata command it is from libata. + * @task: the task to be execute. + * @num: if can_queue great than 1, the task can be queued up. for SMP task, + * we always execute one one time. + * @gfp_flags: gfp_flags. + * @is tmf: if it is task management task. + * @tmf: the task management IU + */ +#define DEV_IS_GONE(pm8001_dev) \ + ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) +static int pm8001_task_exec(struct sas_task *task, const int num, + gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) +{ + struct domain_device *dev = task->dev; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev; + struct sas_task *t = task; + struct pm8001_ccb_info *ccb; + u32 tag = 0xdeadbeef, rc, n_elem = 0; + u32 n = num; + unsigned long flags = 0; + + if (!dev->port) { + struct task_status_struct *tsm = &t->task_status; + tsm->resp = SAS_TASK_UNDELIVERED; + tsm->stat = SAS_PHY_DOWN; + if (dev->dev_type != SATA_DEV) + t->task_done(t); + return 0; + } + pm8001_ha = pm8001_find_ha_by_dev(task->dev); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); + spin_lock_irqsave(&pm8001_ha->lock, flags); + do { + dev = t->dev; + pm8001_dev = dev->lldd_dev; + if (DEV_IS_GONE(pm8001_dev)) { + if (pm8001_dev) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("device %d not ready.\n", + pm8001_dev->device_id)); + } else { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("device %016llx not " + "ready.\n", SAS_ADDR(dev->sas_addr))); + } + rc = SAS_PHY_DOWN; + goto out_done; + } + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + goto err_out; + ccb = &pm8001_ha->ccb_info[tag]; + + if (!sas_protocol_ata(t->task_proto)) { + if (t->num_scatter) { + n_elem = dma_map_sg(pm8001_ha->dev, + t->scatter, + t->num_scatter, + t->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out; + } + } + } else { + n_elem = t->num_scatter; + } + + t->lldd_task = NULL; + ccb->n_elem = n_elem; + ccb->ccb_tag = tag; + ccb->task = t; + switch (t->task_proto) { + case SAS_PROTOCOL_SMP: + rc = pm8001_task_prep_smp(pm8001_ha, ccb); + break; + case SAS_PROTOCOL_SSP: + if (is_tmf) + rc = pm8001_task_prep_ssp_tm(pm8001_ha, + ccb, tmf); + else + rc = pm8001_task_prep_ssp(pm8001_ha, ccb); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = pm8001_task_prep_ata(pm8001_ha, ccb); + break; + default: + dev_printk(KERN_ERR, pm8001_ha->dev, + "unknown sas_task proto: 0x%x\n", + t->task_proto); + rc = -EINVAL; + break; + } + + if (rc) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("rc is %x\n", rc)); + goto err_out_tag; + } + t->lldd_task = ccb; + /* TODO: select normal or high priority */ + spin_lock(&t->task_state_lock); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&t->task_state_lock); + pm8001_dev->running_req++; + if (n > 1) + t = list_entry(t->list.next, struct sas_task, list); + } while (--n); + rc = 0; + goto out_done; + +err_out_tag: + pm8001_tag_free(pm8001_ha, tag); +err_out: + dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); + if (!sas_protocol_ata(t->task_proto)) + if (n_elem) + dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, + t->data_dir); +out_done: + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return rc; +} + +/** + * pm8001_queue_command - register for upper layer used, all IO commands sent + * to HBA are from this interface. + * @task: the task to be execute. + * @num: if can_queue great than 1, the task can be queued up. for SMP task, + * we always execute one one time + * @gfp_flags: gfp_flags + */ +int pm8001_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags) +{ + return pm8001_task_exec(task, num, gfp_flags, 0, NULL); +} + +void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx) +{ + pm8001_tag_clear(pm8001_ha, ccb_idx); +} + +/** + * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task + * @task: the task to be free. + * @ccb_idx: ccb index. + */ +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) +{ + if (!ccb->task) + return; + if (!sas_protocol_ata(task->task_proto)) + if (ccb->n_elem) + dma_unmap_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + task->lldd_task = NULL; + ccb->task = NULL; + ccb->ccb_tag = 0xFFFFFFFF; + pm8001_ccb_free(pm8001_ha, ccb_idx); +} + + /** + * pm8001_alloc_dev - find the empty pm8001_device structure, allocate and + * return it for use. + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + pm8001_ha->devices[dev].id = dev; + return &pm8001_ha->devices[dev]; + } + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("max support %d devices, ignore ..\n", + PM8001_MAX_DEVICES)); + } + return NULL; +} + +static void pm8001_free_dev(struct pm8001_device *pm8001_dev) +{ + u32 id = pm8001_dev->id; + memset(pm8001_dev, 0, sizeof(*pm8001_dev)); + pm8001_dev->id = id; + pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + pm8001_dev->sas_device = NULL; +} + +/** + * pm8001_dev_found_notify - when libsas find a sas domain device, it should + * tell the LLDD that device is found, and then LLDD register this device to + * HBA FW by the command "OPC_INB_REG_DEV", after that the HBA will assign + * a device ID(according to device's sas address) and returned it to LLDD.from + * now on, we communicate with HBA FW with the device ID which HBA assigned + * rather than sas address. it is the neccessary step for our HBA but it is + * the optional for other HBA driver. + * @dev: the device structure which sas layer used. + */ +static int pm8001_dev_found_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + int res = 0; + struct pm8001_hba_info *pm8001_ha = NULL; + struct domain_device *parent_dev = dev->parent; + struct pm8001_device *pm8001_device; + DECLARE_COMPLETION_ONSTACK(completion); + u32 flag = 0; + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + + pm8001_device = pm8001_alloc_dev(pm8001_ha); + pm8001_device->sas_device = dev; + if (!pm8001_device) { + res = -1; + goto found_out; + } + dev->lldd_dev = pm8001_device; + pm8001_device->dev_type = dev->dev_type; + pm8001_device->dcompletion = &completion; + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + int phy_id; + struct ex_phy *phy; + for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; + phy_id++) { + phy = &parent_dev->ex_dev.ex_phy[phy_id]; + if (SAS_ADDR(phy->attached_sas_addr) + == SAS_ADDR(dev->sas_addr)) { + pm8001_device->attached_phy = phy_id; + break; + } + } + if (phy_id == parent_dev->ex_dev.num_phys) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Error: no attached dev:%016llx" + " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr))); + res = -1; + } + } else { + if (dev->dev_type == SATA_DEV) { + pm8001_device->attached_phy = + dev->rphy->identify.phy_identifier; + flag = 1; /* directly sata*/ + } + } /*register this device to HBA*/ + PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n")); + PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + wait_for_completion(&completion); + if (dev->dev_type == SAS_END_DEV) + msleep(50); + pm8001_ha->flags = PM8001F_RUN_TIME ; + return 0; +found_out: + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return res; +} + +int pm8001_dev_found(struct domain_device *dev) +{ + return pm8001_dev_found_notify(dev); +} + +/** + * pm8001_alloc_task - allocate a task structure for TMF + */ +static struct sas_task *pm8001_alloc_task(void) +{ + struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + return task; +} + +static void pm8001_free_task(struct sas_task *task) +{ + if (task) { + BUG_ON(!list_empty(&task->list)); + kfree(task); + } +} + +static void pm8001_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +static void pm8001_tmf_timedout(unsigned long data) +{ + struct sas_task *task = (struct sas_task *)data; + + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->completion); +} + +#define PM8001_TASK_TIMEOUT 20 +/** + * pm8001_exec_internal_tmf_task - when errors or exception happened, we may + * want to do something, for example abort issued task which result in this + * execption, this is by calling this function, note it is also with the task + * execute interface. + * @dev: the wanted device. + * @tmf: which task management wanted to be take. + * @para_len: para_len. + * @parameter: ssp task parameter. + */ +static int pm8001_exec_internal_tmf_task(struct domain_device *dev, + void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) +{ + int res, retry; + struct sas_task *task = NULL; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + + for (retry = 0; retry < 3; retry++) { + task = pm8001_alloc_task(); + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + memcpy(&task->ssp_task, parameter, para_len); + task->task_done = pm8001_task_done; + task->timer.data = (unsigned long)task; + task->timer.function = pm8001_tmf_timedout; + task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; + add_timer(&task->timer); + + res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); + + if (res) { + del_timer(&task->timer); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Executing internal task " + "failed\n")); + goto ex_err; + } + wait_for_completion(&task->completion); + res = -TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TMF task[%x]timeout.\n", + tmf->tmf)); + goto ex_err; + } + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Blocked task error.\n")); + res = -EMSGSIZE; + break; + } else { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" Task to dev %016llx response: 0x%x" + "status 0x%x\n", + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat)); + pm8001_free_task(task); + task = NULL; + } + } +ex_err: + BUG_ON(retry == 3 && task != NULL); + if (task != NULL) + pm8001_free_task(task); + return res; +} + +static int +pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, + u32 task_tag) +{ + int res, retry; + u32 rc, ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + + for (retry = 0; retry < 3; retry++) { + task = pm8001_alloc_task(); + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + task->task_done = pm8001_task_done; + task->timer.data = (unsigned long)task; + task->timer.function = pm8001_tmf_timedout; + task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; + add_timer(&task->timer); + + rc = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + res = PM8001_CHIP_DISP->task_abort(pm8001_ha, + pm8001_dev, flag, task_tag, ccb_tag); + + if (res) { + del_timer(&task->timer); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Executing internal task " + "failed\n")); + goto ex_err; + } + wait_for_completion(&task->completion); + res = TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TMF task timeout.\n")); + goto ex_err; + } + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + + } else { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk(" Task to dev %016llx response: " + "0x%x status 0x%x\n", + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat)); + pm8001_free_task(task); + task = NULL; + } + } +ex_err: + BUG_ON(retry == 3 && task != NULL); + if (task != NULL) + pm8001_free_task(task); + return res; +} + +/** + * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" + * @dev: the device structure which sas layer used. + */ +static void pm8001_dev_gone_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + u32 tag; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + u32 device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + pm8001_tag_alloc(pm8001_ha, &tag); + if (pm8001_dev) { + PM8001_DISC_DBG(pm8001_ha, + pm8001_printk("found dev[%d:%x] is gone.\n", + pm8001_dev->device_id, pm8001_dev->dev_type)); + if (pm8001_dev->running_req) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_free_dev(pm8001_dev); + } else { + PM8001_DISC_DBG(pm8001_ha, + pm8001_printk("Found dev has gone.\n")); + } + dev->lldd_dev = NULL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); +} + +void pm8001_dev_gone(struct domain_device *dev) +{ + pm8001_dev_gone_notify(dev); +} + +static int pm8001_issue_ssp_tmf(struct domain_device *dev, + u8 *lun, struct pm8001_tmf_task *tmf) +{ + struct sas_ssp_task ssp_task; + if (!(dev->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + strncpy((u8 *)&ssp_task.LUN, lun, 8); + return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), + tmf); +} + +/** + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1) , only for RECOVERY + */ +int pm8001_I_T_nexus_reset(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + phy = sas_find_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + rc = sas_phy_reset(phy, 1); + msleep(2000); + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + pm8001_dev->setds_completion = &completion_setstate; + rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, 0x01); + wait_for_completion(&completion_setstate); + } else{ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); + return rc; +} + +/* mandatory SAM-3, the task reset the specified LUN*/ +int pm8001_lu_reset(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_tmf_task tmf_task; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + if (dev_is_sata(dev)) { + struct sas_phy *phy = sas_find_local_phy(dev); + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + rc = sas_phy_reset(phy, 1); + rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, 0x01); + msleep(2000); + } else { + tmf_task.tmf = TMF_LU_RESET; + rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); + } + /* If failed, fall-through I_T_Nexus reset */ + PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); + return rc; +} + +/* optional SAM-3 */ +int pm8001_query_task(struct sas_task *task) +{ + u32 tag = 0xdeadbeef; + int i = 0; + struct scsi_lun lun; + struct pm8001_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + if (unlikely(!task || !task->lldd_task || !task->dev)) + return rc; + + if (task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *dev = task->dev; + struct pm8001_hba_info *pm8001_ha = + pm8001_find_ha_by_dev(dev); + + int_to_scsilun(cmnd->device->lun, &lun); + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); + for (i = 0; i < 16; i++) + printk(KERN_INFO "%02x ", cmnd->cmnd[i]); + printk(KERN_INFO "]\n"); + tmf_task.tmf = TMF_QUERY_TASK; + tmf_task.tag_of_task_to_be_managed = tag; + + rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("The task is still in Lun \n")); + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("The task is not in Lun or failed," + " reset the phy \n")); + break; + } + } + pm8001_printk(":rc= %d\n", rc); + return rc; +} + +/* mandatory SAM-3, still need free task/ccb info, abord the specified task */ +int pm8001_abort_task(struct sas_task *task) +{ + unsigned long flags; + u32 tag = 0xdeadbeef; + u32 device_id; + struct domain_device *dev ; + struct pm8001_hba_info *pm8001_ha = NULL; + struct pm8001_ccb_info *ccb; + struct scsi_lun lun; + struct pm8001_device *pm8001_dev; + struct pm8001_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + if (unlikely(!task || !task->lldd_task || !task->dev)) + return rc; + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + dev = task->dev; + ccb = task->lldd_task; + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + int_to_scsilun(cmnd->device->lun, &lun); + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + printk(KERN_INFO "No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + device_id = pm8001_dev->device_id; + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("abort io to device_id = %d\n", device_id)); + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = tag; + rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, + pm8001_dev->sas_device, 0, tag); + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + dev = task->dev; + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + printk(KERN_INFO "No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, + pm8001_dev->sas_device, 0, tag); + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + /* SMP */ + dev = task->dev; + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + printk(KERN_INFO "No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, + pm8001_dev->sas_device, 0, tag); + + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + pm8001_printk("rc= %d\n", rc); + return rc; +} + +int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_tmf_task tmf_task; + + tmf_task.tmf = TMF_ABORT_TASK_SET; + rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); + return rc; +} + +int pm8001_clear_aca(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_ACA; + rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_tmf_task tmf_task; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_L_Q clear task set[%x]\n", + pm8001_dev->device_id)); + tmf_task.tmf = TMF_CLEAR_TASK_SET; + rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); + return rc; +} + diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h new file mode 100644 index 000000000000..14c676bbb533 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -0,0 +1,480 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_SAS_H_ +#define _PM8001_SAS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pm8001_defs.h" + +#define DRV_NAME "pm8001" +#define DRV_VERSION "0.1.36" +#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */ +#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ +#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ +#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ +#define PM8001_EH_LOGGING 0x10 /* Error message logging */ +#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ +#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ +#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ + __func__, __LINE__, ## arg) +#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ +do { \ + if (unlikely(HBA->logging_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0); + +#define PM8001_EH_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD) + +#define PM8001_INIT_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD) + +#define PM8001_DISC_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD) + +#define PM8001_IO_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD) + +#define PM8001_FAIL_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD) + +#define PM8001_IOCTL_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD) + +#define PM8001_MSG_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD) + + +#define PM8001_USE_TASKLET +#define PM8001_USE_MSIX + + +#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) + +#define PM8001_NAME_LENGTH 32/* generic length of strings */ +extern struct list_head hba_list; +extern const struct pm8001_dispatch pm8001_8001_dispatch; + +struct pm8001_hba_info; +struct pm8001_ccb_info; +struct pm8001_device; +struct pm8001_tmf_task; +struct pm8001_dispatch { + char *name; + int (*chip_init)(struct pm8001_hba_info *pm8001_ha); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); + int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); + void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); + void (*isr)(struct pm8001_hba_info *pm8001_ha); + u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + int (*smp_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*sata_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag); + int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id); + int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha, + u32 phy_id, u32 phy_op); + int (*task_abort)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, + u32 cmd_tag); + int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf); + int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha, + void *payload); + int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); + int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); + int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); +}; + +struct pm8001_chip_info { + u32 n_phy; + const struct pm8001_dispatch *dispatch; +}; +#define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch) + +struct pm8001_port { + struct asd_sas_port sas_port; +}; + +struct pm8001_phy { + struct pm8001_hba_info *pm8001_ha; + struct pm8001_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u32 phy_type; + struct completion *enable_completion; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 phy_state; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct pm8001_device { + enum sas_dev_type dev_type; + struct domain_device *sas_device; + u32 attached_phy; + u32 id; + struct completion *dcompletion; + struct completion *setds_completion; + u32 device_id; + u32 running_req; +}; + +struct pm8001_prd_imt { + __le32 len; + __le32 e; +}; + +struct pm8001_prd { + __le64 addr; /* 64-bit buffer address */ + struct pm8001_prd_imt im_len; /* 64-bit length */ +} __attribute__ ((packed)); +/* + * CCB(Command Control Block) + */ +struct pm8001_ccb_info { + struct list_head entry; + struct sas_task *task; + u32 n_elem; + u32 ccb_tag; + dma_addr_t ccb_dma_handle; + struct pm8001_device *device; + struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG]; + struct fw_control_ex *fw_control_context; +}; + +struct mpi_mem { + void *virt_ptr; + dma_addr_t phys_addr; + u32 phys_addr_hi; + u32 phys_addr_lo; + u32 total_len; + u32 num_elements; + u32 element_size; + u32 alignment; +}; + +struct mpi_mem_req { + /* The number of element in the mpiMemory array */ + u32 count; + /* The array of structures that define memroy regions*/ + struct mpi_mem region[USI_MAX_MEMCNT]; +}; + +struct main_cfg_table { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 outbound_hw_event_pid0_3; + u32 outbound_hw_event_pid4_7; + u32 outbound_ncq_event_pid0_3; + u32 outbound_ncq_event_pid4_7; + u32 outbound_tgt_ITNexus_event_pid0_3; + u32 outbound_tgt_ITNexus_event_pid4_7; + u32 outbound_tgt_ssp_event_pid0_3; + u32 outbound_tgt_ssp_event_pid4_7; + u32 outbound_tgt_smp_event_pid0_3; + u32 outbound_tgt_smp_event_pid4_7; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_option; + u32 upper_iop_event_log_addr; + u32 lower_iop_event_log_addr; + u32 iop_event_log_size; + u32 iop_event_log_option; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 hda_mode_flag; + u32 anolog_setup_table_offset; +}; +struct general_status_table { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 reserved; + u32 phy_state[8]; + u32 reserved1; + u32 reserved2; + u32 reserved3; + u32 recover_err_info[8]; +}; +struct inbound_queue_table { + u32 element_pri_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + u32 ci_upper_base_addr; + u32 ci_lower_base_addr; + u32 pi_pci_bar; + u32 pi_offset; + u32 total_length; + void *base_virt; + void *ci_virt; + u32 reserved; + __le32 consumer_index; + u32 producer_idx; +}; +struct outbound_queue_table { + u32 element_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + void *base_virt; + u32 pi_upper_base_addr; + u32 pi_lower_base_addr; + u32 ci_pci_bar; + u32 ci_offset; + u32 total_length; + void *pi_virt; + u32 interrup_vec_cnt_delay; + u32 dinterrup_to_pci_offset; + __le32 producer_index; + u32 consumer_idx; +}; +struct pm8001_hba_memspace { + void __iomem *memvirtaddr; + u64 membase; + u32 memsize; +}; +struct pm8001_hba_info { + char name[PM8001_NAME_LENGTH]; + struct list_head list; + unsigned long flags; + spinlock_t lock;/* host-wide lock */ + struct pci_dev *pdev;/* our device */ + struct device *dev; + struct pm8001_hba_memspace io_mem[6]; + struct mpi_mem_req memoryMap; + void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ + void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ + void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ + void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ + void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ + struct main_cfg_table main_cfg_tbl; + struct general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + u8 sas_addr[SAS_ADDR_SIZE]; + struct sas_ha_struct *sas;/* SCSI/SAS glue */ + struct Scsi_Host *shost; + u32 chip_id; + const struct pm8001_chip_info *chip; + struct completion *nvmd_completion; + int tags_num; + unsigned long *tags; + struct pm8001_phy phy[PM8001_MAX_PHYS]; + struct pm8001_port port[PM8001_MAX_PHYS]; + u32 id; + u32 irq; + struct pm8001_device *devices; + struct pm8001_ccb_info *ccb_info; +#ifdef PM8001_USE_MSIX + struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + int number_of_intr;/*will be used in remove()*/ +#endif +#ifdef PM8001_USE_TASKLET + struct tasklet_struct tasklet; +#endif + struct list_head wq_list; + u32 logging_level; + u32 fw_status; + const struct firmware *fw_image; +}; + +struct pm8001_wq { + struct delayed_work work_q; + struct pm8001_hba_info *pm8001_ha; + void *data; + int handler; + struct list_head entry; +}; + +struct pm8001_fw_image_header { + u8 vender_id[8]; + u8 product_id; + u8 hardware_rev; + u8 dest_partition; + u8 reserved; + u8 fw_rev[4]; + __be32 image_length; + __be32 image_crc; + __be32 startup_entry; +} __attribute__((packed, aligned(4))); + +/* define task management IU */ +struct pm8001_tmf_task { + u8 tmf; + u32 tag_of_task_to_be_managed; +}; +/** + * FW Flash Update status values + */ +#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00 +#define FLASH_UPDATE_IN_PROGRESS 0x01 +#define FLASH_UPDATE_HDR_ERR 0x02 +#define FLASH_UPDATE_OFFSET_ERR 0x03 +#define FLASH_UPDATE_CRC_ERR 0x04 +#define FLASH_UPDATE_LENGTH_ERR 0x05 +#define FLASH_UPDATE_HW_ERR 0x06 +#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 +#define FLASH_UPDATE_DISABLED 0x11 + +/** + * brief param structure for firmware flash update. + */ +struct fw_flash_updata_info { + u32 cur_image_offset; + u32 cur_image_len; + u32 total_image_len; + struct pm8001_prd sgl; +}; + +struct fw_control_info { + u32 retcode;/*ret code (status)*/ + u32 phase;/*ret code phase*/ + u32 phaseCmplt;/*percent complete for the current + update phase */ + u32 version;/*Hex encoded firmware version number*/ + u32 offset;/*Used for downloading firmware */ + u32 len; /*len of buffer*/ + u32 size;/* Used in OS VPD and Trace get size + operations.*/ + u32 reserved;/* padding required for 64 bit + alignment */ + u8 buffer[1];/* Start of buffer */ +}; +struct fw_control_ex { + struct fw_control_info *fw_control; + void *buffer;/* keep buffer pointer to be + freed when the responce comes*/ + void *virtAddr;/* keep virtual address of the data */ + void *usrAddr;/* keep virtual address of the + user data */ + dma_addr_t phys_addr; + u32 len; /* len of buffer */ + void *payload; /* pointer to IOCTL Payload */ + u8 inProgress;/*if 1 - the IOCTL request is in + progress */ + void *param1; + void *param2; + void *param3; +}; + +/******************** function prototype *********************/ +int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); +void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); +void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx); +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +int pm8001_slave_alloc(struct scsi_device *scsi_dev); +int pm8001_slave_configure(struct scsi_device *sdev); +void pm8001_scan_start(struct Scsi_Host *shost); +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); +int pm8001_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags); +int pm8001_abort_task(struct sas_task *task); +int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); +int pm8001_clear_aca(struct domain_device *dev, u8 *lun); +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun); +int pm8001_dev_found(struct domain_device *dev); +void pm8001_dev_gone(struct domain_device *dev); +int pm8001_lu_reset(struct domain_device *dev, u8 *lun); +int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_query_task(struct sas_task *task); +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, + u32 mem_size, u32 align); + + +/* ctl shared API */ +extern struct device_attribute *pm8001_host_attrs[]; + +#endif + diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b0f0f3851cd4..161fadb291d1 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1586,6 +1586,8 @@ #define PCI_VENDOR_ID_COMPEX 0x11f6 #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 +#define PCI_VENDOR_ID_PMC_Sierra 0x11f8 + #define PCI_VENDOR_ID_RP 0x11fe #define PCI_DEVICE_ID_RP32INTF 0x0001 #define PCI_DEVICE_ID_RP8INTF 0x0002 -- cgit v1.2.3-59-g8ed1b From e881a172dac4d9ea3b2a1540041d872963c269bd Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 15 Oct 2009 17:46:39 -0700 Subject: [SCSI] modify change_queue_depth to take in reason why it is being called This patch modifies scsi_host_template->change_queue_depth so that it takes an argument indicating why it is being called. This will be used so that if a LLD needs to do some extra processing when handling queue fulls or later ramp ups, it can do so. This is a simple port of the drivers setting a change_queue_depth callback. In the patch I just have these LLDs adjust the queue depth if the user was requesting it. Signed-off-by: Mike Christie [Vasu.Dev: v2 Also converted pmcraid_change_queue_depth and then verified all modules compile using "make allmodconfig" for any new build warnings on X86_64. Updated original description after combing two original patches from Mike to make this patch git bisectable.] Signed-off-by: Vasu Dev [jejb: fixed up 53c700] Signed-off-by: James Bottomley --- drivers/ata/libata-scsi.c | 7 ++++++- drivers/ata/sata_nv.c | 2 +- drivers/message/fusion/mptscsih.c | 9 +++++++-- drivers/message/fusion/mptscsih.h | 3 ++- drivers/s390/scsi/zfcp_scsi.c | 6 +++++- drivers/scsi/3w-9xxx.c | 6 +++++- drivers/scsi/3w-xxxx.c | 6 +++++- drivers/scsi/53c700.c | 7 +++++-- drivers/scsi/aacraid/linit.c | 6 +++++- drivers/scsi/arcmsr/arcmsr_hba.c | 5 ++++- drivers/scsi/hptiop.c | 5 ++++- drivers/scsi/ibmvscsi/ibmvfc.c | 7 ++++++- drivers/scsi/ibmvscsi/ibmvscsi.c | 7 ++++++- drivers/scsi/ipr.c | 7 ++++++- drivers/scsi/libfc/fc_fcp.c | 5 ++++- drivers/scsi/libiscsi.c | 5 ++++- drivers/scsi/libsas/sas_scsi_host.c | 6 +++++- drivers/scsi/megaraid/megaraid_mbox.c | 7 ++++++- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 10 +++++++--- drivers/scsi/pmcraid.c | 7 ++++++- drivers/scsi/qla2xxx/qla_os.c | 7 +++++-- drivers/scsi/scsi_sysfs.c | 3 ++- include/linux/libata.h | 2 +- include/scsi/libfc.h | 2 +- include/scsi/libiscsi.h | 3 ++- include/scsi/libsas.h | 3 ++- include/scsi/scsi_host.h | 8 +++++++- 27 files changed, 119 insertions(+), 32 deletions(-) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b4ee28dec521..5d52c2fcd076 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1208,6 +1208,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) * ata_scsi_change_queue_depth - SCSI callback for queue depth config * @sdev: SCSI device to configure queue depth for * @queue_depth: new queue depth + * @reason: calling context * * This is libata standard hostt->change_queue_depth callback. * SCSI will call into this callback when user tries to set queue @@ -1219,12 +1220,16 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) * RETURNS: * Newly configured queue depth. */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; unsigned long flags; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth < 1 || queue_depth == sdev->queue_depth) return sdev->queue_depth; diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 1eb4e020eb5c..0c82d335c55d 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1975,7 +1975,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev) ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strncmp(model_num, "Maxtor", 6) == 0) { - ata_scsi_change_queue_depth(sdev, 1); + ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); ata_dev_printk(dev, KERN_NOTICE, "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth); } diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index f68ec48a881e..57752751712b 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2351,11 +2351,12 @@ mptscsih_slave_destroy(struct scsi_device *sdev) * mptscsih_change_queue_depth - This function will set a devices queue depth * @sdev: per scsi_device pointer * @qdepth: requested queue depth + * @reason: calling context * * Adding support for new 'change_queue_depth' api. */ int -mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { MPT_SCSI_HOST *hd = shost_priv(sdev->host); VirtTarget *vtarget; @@ -2367,6 +2368,9 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth) starget = scsi_target(sdev); vtarget = starget->hostdata; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (ioc->bus_type == SPI) { if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) max_depth = 1; @@ -2433,7 +2437,8 @@ mptscsih_slave_configure(struct scsi_device *sdev) ioc->name, vtarget->negoFlags, vtarget->maxOffset, vtarget->minSyncFactor)); - mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); + mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH, + SCSI_QDEPTH_DEFAULT); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "tagged %d, simple %d, ordered %d\n", ioc->name,sdev->tagged_supported, sdev->simple_tags, diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index e0b33e04a33b..45a5ff3eff61 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -128,7 +128,8 @@ extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_F extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); -extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); +extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0e1a34627a2e..ad1154701729 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -29,8 +29,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) return fcp_sns_info_ptr; } -static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) +static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); return sdev->queue_depth; } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 36c21b19e5d7..2d16d49fd3cd 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -186,8 +186,12 @@ static ssize_t twa_show_stats(struct device *dev, } /* End twa_show_stats() */ /* This function will set a devices queue depth */ -static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth) +static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth > TW_Q_LENGTH-2) queue_depth = TW_Q_LENGTH-2; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index faa0fcfed71e..d224294c38fb 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -521,8 +521,12 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr, } /* End tw_show_stats() */ /* This function will set a devices queue depth */ -static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth) +static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth > TW_Q_LENGTH-2) queue_depth = TW_Q_LENGTH-2; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index f5a9addb7050..6c60a8060c58 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host); STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); -static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); +static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason); static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); STATIC struct device_attribute *NCR_700_dev_attrs[]; @@ -2082,8 +2082,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp) } static int -NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) +NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (depth > NCR_700_MAX_TAGS) depth = NCR_700_MAX_TAGS; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 9b97c3e016fe..e9373a2d14fa 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -472,8 +472,12 @@ static int aac_slave_configure(struct scsi_device *sdev) * total capacity and the queue depth supported by the target device. */ -static int aac_change_queue_depth(struct scsi_device *sdev, int depth) +static int aac_change_queue_depth(struct scsi_device *sdev, int depth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && (sdev_channel(sdev) == CONTAINER_CHANNEL)) { struct scsi_device * dev; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 80aac01b5a6f..47d5d19f8c92 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -98,8 +98,11 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, - int queue_depth) + int queue_depth, int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth > ARCMSR_MAX_CMD_PERLUN) queue_depth = ARCMSR_MAX_CMD_PERLUN; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a0e7e711ff9d..901a3daeb36b 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -861,10 +861,13 @@ static int hptiop_reset(struct scsi_cmnd *scp) } static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, - int queue_depth) + int queue_depth, int reason) { struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth > hba->max_requests) queue_depth = hba->max_requests; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bc9beb8c587c..87b536a97cb4 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2764,12 +2764,17 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) * ibmvfc_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set + * @reason: calling context * * Return value: * actual depth set **/ -static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) +static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) qdepth = IBMVFC_MAX_CMDS_PER_LUN; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d9b0e9d31983..e475b7957c2d 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1637,12 +1637,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) * ibmvscsi_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set + * @reason: calling context * * Return value: * actual depth set **/ -static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) +static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 5f045505a1f4..d40d5c79fff1 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -3367,16 +3367,21 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; * ipr_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set + * @reason: calling context * * Return value: * actual depth set **/ -static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) +static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index a67f53a5026c..beaab818d8de 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2064,8 +2064,11 @@ int fc_slave_alloc(struct scsi_device *sdev) } EXPORT_SYMBOL(fc_slave_alloc); -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); return sdev->queue_depth; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f1a4246f890c..67d0f3fc8ac0 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1643,8 +1643,11 @@ fault: } EXPORT_SYMBOL_GPL(iscsi_queuecommand); -int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) +int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); return sdev->queue_depth; } diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 1c558d3bce18..14b13196b22d 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -820,10 +820,14 @@ void sas_slave_destroy(struct scsi_device *scsi_dev) ata_port_disable(dev->sata_dev.ap); } -int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) +int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth, + int reason) { int res = min(new_depth, SAS_MAX_QD); + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (scsi_dev->tagged_supported) scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), res); diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 234f0b7eb21c..fd181c2a8ae4 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -335,12 +335,17 @@ static struct device_attribute *megaraid_sdev_attrs[] = { * megaraid_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set + * @reason: calling context * * Return value: * actual depth set */ -static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) +static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (qdepth > MBOX_MAX_SCSI_CMDS) qdepth = MBOX_MAX_SCSI_CMDS; scsi_adjust_queue_depth(sdev, 0, qdepth); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 8dc682f00fd2..55ee014a7e08 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1099,11 +1099,12 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth + * @reason: calling context * * Returns queue depth. */ static int -_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1114,6 +1115,9 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) struct _sas_device *sas_device; unsigned long flags; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + max_depth = shost->can_queue; /* limit max device queue for SATA to 32 */ @@ -1569,7 +1573,7 @@ _scsih_slave_configure(struct scsi_device *sdev) r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); - _scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); return 0; } @@ -1615,7 +1619,7 @@ _scsih_slave_configure(struct scsi_device *sdev) _scsih_display_sata_capabilities(ioc, sas_device, sdev); } - _scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); if (ssp_target) sas_read_port_mode_page(sdev); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index f7c70e2a8224..86d158ee3572 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -278,12 +278,17 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) * pmcraid_change_queue_depth - Change the device's queue depth * @scsi_dev: scsi device struct * @depth: depth to set + * @reason: calling context * * Return value * actual depth set */ -static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) +static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, + int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (depth > PMCRAID_MAX_CMD_PER_LUN) depth = PMCRAID_MAX_CMD_PER_LUN; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ecf2a40d70be..d69744a62fe4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -138,7 +138,7 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *); static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); static int qla2xxx_eh_host_reset(struct scsi_cmnd *); -static int qla2x00_change_queue_depth(struct scsi_device *, int); +static int qla2x00_change_queue_depth(struct scsi_device *, int, int); static int qla2x00_change_queue_type(struct scsi_device *, int); struct scsi_host_template qla2xxx_driver_template = { @@ -1235,8 +1235,11 @@ qla2xxx_slave_destroy(struct scsi_device *sdev) } static int -qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) +qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); return sdev->queue_depth; } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5c7eb63a19d1..a48782866b22 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -766,7 +766,8 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, if (depth < 1) return -EINVAL; - retval = sht->change_queue_depth(sdev, depth); + retval = sht->change_queue_depth(sdev, depth, + SCSI_QDEPTH_DEFAULT); if (retval < 0) return retval; diff --git a/include/linux/libata.h b/include/linux/libata.h index 87698640c091..85df383fd4bd 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1023,7 +1023,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev, extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, - int queue_depth); + int queue_depth, int reason); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 1662d73d85a7..9617f9365e45 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -919,7 +919,7 @@ int fc_slave_alloc(struct scsi_device *sdev); /* * Adjust the queue depth. */ -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth); +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason); /* * Change the tag type. diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index a72edd4eceec..2db2bc26b1e9 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -333,7 +333,8 @@ struct iscsi_host { /* * scsi host template */ -extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth); +extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, + int reason); extern int iscsi_eh_abort(struct scsi_cmnd *sc); extern int iscsi_eh_target_reset(struct scsi_cmnd *sc); extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index e78d3b62d8ec..9eaa3f05f954 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -634,7 +634,8 @@ extern int sas_target_alloc(struct scsi_target *); extern int sas_slave_alloc(struct scsi_device *); extern int sas_slave_configure(struct scsi_device *); extern void sas_slave_destroy(struct scsi_device *); -extern int sas_change_queue_depth(struct scsi_device *, int new_depth); +extern int sas_change_queue_depth(struct scsi_device *, int new_depth, + int reason); extern int sas_change_queue_type(struct scsi_device *, int qt); extern int sas_bios_param(struct scsi_device *, struct block_device *, diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 6e728b176904..603054d8f40c 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -43,6 +43,12 @@ struct blk_queue_tags; #define DISABLE_CLUSTERING 0 #define ENABLE_CLUSTERING 1 +enum { + SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ + SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ + SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */ +}; + struct scsi_host_template { struct module *module; const char *name; @@ -294,7 +300,7 @@ struct scsi_host_template { * * Status: OPTIONAL */ - int (* change_queue_depth)(struct scsi_device *, int); + int (* change_queue_depth)(struct scsi_device *, int, int); /* * Fill in this function to allow the changing of tag types -- cgit v1.2.3-59-g8ed1b From 42a6a91833f1e0f5ee5b5ef98e9f00167b615f46 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 15 Oct 2009 17:46:44 -0700 Subject: [SCSI] scsi error: have scsi-ml call change_queue_depth to handle QUEUE_FULL This has scsi-ml call the change_queue_depth functions when we get a QUEUE_FULL. It will only change the queue depth if change_queue_depth is set because the LLD may have to modify some internal resources, so I thought this would be the safest route. Signed-off-by: Mike Christie -v2 Limits change_queue_depth to only all luns of target by adding channel check while iterating for all luns of Scsi_Host. This is same as currently qla2xxx FC HBA does on QUEUE_FULL event. Signed-off-by: Vasu Dev Signed-off-by: James Bottomley --- drivers/scsi/scsi_error.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1b0060b791e8..7b1e20fee906 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -331,6 +331,28 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) } } +static void scsi_handle_queue_full(struct scsi_device *sdev) +{ + struct scsi_host_template *sht = sdev->host->hostt; + struct scsi_device *tmp_sdev; + + if (!sht->change_queue_depth) + return; + + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->channel != sdev->channel || + tmp_sdev->id != sdev->id) + continue; + /* + * We do not know the number of commands that were at + * the device when we got the queue full so we start + * from the highest possible value and work our way down. + */ + sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, + SCSI_QDEPTH_QFULL); + } +} + /** * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. * @scmd: SCSI cmd to examine. @@ -387,8 +409,10 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) * let issuer deal with this, it could be just fine */ return SUCCESS; - case BUSY: case QUEUE_FULL: + scsi_handle_queue_full(scmd->device); + /* fall through */ + case BUSY: default: return FAILED; } @@ -1387,6 +1411,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) */ switch (status_byte(scmd->result)) { case QUEUE_FULL: + scsi_handle_queue_full(scmd->device); /* * the case of trying to send too many commands to a * tagged queueing device. -- cgit v1.2.3-59-g8ed1b From 5c20848a096fb1880ded99816be79d78ca1cd696 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 15 Oct 2009 17:46:50 -0700 Subject: [SCSI] libfc: convert to scsi_track_queue_full This converts the libfc using scsi_track_queue_full to track the queue full from the change_queue_depth callback. Signed-off-by: Mike Christie Signed-off-by: Vasu Dev Acked-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index beaab818d8de..c0dc8e151c65 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1815,21 +1815,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = DID_OK << 16; if (fsp->scsi_resid) CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; - } else if (fsp->cdb_status == QUEUE_FULL) { - struct scsi_device *tmp_sdev; - struct scsi_device *sdev = sc_cmd->device; - - shost_for_each_device(tmp_sdev, sdev->host) { - if (tmp_sdev->id != sdev->id) - continue; - - if (tmp_sdev->queue_depth > 1) { - scsi_track_queue_full(tmp_sdev, - tmp_sdev-> - queue_depth - 1); - } - } - sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; } else { /* * transport level I/O was ok but scsi @@ -2066,10 +2051,16 @@ EXPORT_SYMBOL(fc_slave_alloc); int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { - if (reason != SCSI_QDEPTH_DEFAULT) + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + scsi_track_queue_full(sdev, qdepth); + break; + default: return -EOPNOTSUPP; - - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + } return sdev->queue_depth; } EXPORT_SYMBOL(fc_change_queue_depth); -- cgit v1.2.3-59-g8ed1b From 14caf44c69184ed72d46a2f883311daf27a4192f Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Thu, 15 Oct 2009 17:46:55 -0700 Subject: [SCSI] fcoe, libfc: fix an libfc issue with queue ramp down in libfc The cmd_per_lun value is used by scsi-ml as fall back lowest queue_depth value but in case of libfc cmd_per_lun is set to same value as max queue_depth = 32. So this patch reduces cmd_per_lun value to 3 and configures each lun with default max queue_depth 32 in fc_slave_alloc. Signed-off-by: Vasu Dev Acked-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/libfc/fc_fcp.c | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index aef29afb6e71..4efbc17a7d7f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -137,7 +137,7 @@ static struct scsi_host_template fcoe_shost_template = { .change_queue_depth = fc_change_queue_depth, .change_queue_type = fc_change_queue_type, .this_id = -1, - .cmd_per_lun = 32, + .cmd_per_lun = 3, .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index c0dc8e151c65..48de805eb193 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2033,18 +2033,16 @@ EXPORT_SYMBOL(fc_eh_host_reset); int fc_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - int queue_depth; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - if (sdev->tagged_supported) { - if (sdev->host->hostt->cmd_per_lun) - queue_depth = sdev->host->hostt->cmd_per_lun; - else - queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; - scsi_activate_tcq(sdev, queue_depth); - } + if (sdev->tagged_supported) + scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); + else + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), + FC_FCP_DFLT_QUEUE_DEPTH); + return 0; } EXPORT_SYMBOL(fc_slave_alloc); -- cgit v1.2.3-59-g8ed1b From 4a84067dbfce436b81779e585bf712b02ceee552 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Thu, 22 Oct 2009 15:46:33 -0700 Subject: [SCSI] add queue_depth ramp up code Current FC HBA queue_depth ramp up code depends on last queue full time. The sdev already has last_queue_full_time field to track last queue full time but stored value is truncated by last four bits. So this patch updates last_queue_full_time without truncating last 4 bits to store full value and then updates its only current usages in scsi_track_queue_full to ignore last four bits to keep current usages same while also use this field in added ramp up code. Adds scsi_handle_queue_ramp_up to ramp up queue_depth on successful completion of IO. The scsi_handle_queue_ramp_up will do ramp up on all luns of a target, just same as ramp down done on all luns on a target. The ramp up is skipped in case the change_queue_depth is not supported by LLD or already reached to added max_queue_depth. Updates added max_queue_depth on every new update to default queue_depth value. The ramp up is also skipped if lapsed time since either last queue ramp up or down is less than LLD specified queue_ramp_up_period. Adds queue_ramp_up_period to sysfs but only if change_queue_depth is supported since ramp up and queue_ramp_up_period is needed only in case change_queue_depth is supported first. Initializes queue_ramp_up_period to 120HZ jiffies as initial default value, it is same as used in existing lpfc and qla2xxx. -v2 Combined all ramp code into this single patch. -v3 Moves max_queue_depth initialization after slave_configure is called from after slave_alloc calling done. Also adjusted max_queue_depth check to skip ramp up if current queue_depth is >= max_queue_depth. -v4 Changes sdev->queue_ramp_up_period unit to ms when using sysfs i/f to store or show its value. Signed-off-by: Vasu Dev Tested-by: Christof Schmitt Tested-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/scsi.c | 10 ++++++++-- drivers/scsi/scsi_error.c | 38 ++++++++++++++++++++++++++++++++++++++ drivers/scsi/scsi_scan.c | 3 +++ drivers/scsi/scsi_sysfs.c | 41 +++++++++++++++++++++++++++++++++++++++-- include/scsi/scsi_device.h | 9 ++++++--- 5 files changed, 94 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index dd098cad337b..a60da5555577 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -940,10 +940,16 @@ EXPORT_SYMBOL(scsi_adjust_queue_depth); */ int scsi_track_queue_full(struct scsi_device *sdev, int depth) { - if ((jiffies >> 4) == sdev->last_queue_full_time) + + /* + * Don't let QUEUE_FULLs on the same + * jiffies count, they could all be from + * same event. + */ + if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) return 0; - sdev->last_queue_full_time = (jiffies >> 4); + sdev->last_queue_full_time = jiffies; if (sdev->last_queue_full_depth != depth) { sdev->last_queue_full_count = 1; sdev->last_queue_full_depth = depth; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 7b1e20fee906..08ed506e6059 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -331,6 +331,42 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) } } +static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) +{ + struct scsi_host_template *sht = sdev->host->hostt; + struct scsi_device *tmp_sdev; + + if (!sht->change_queue_depth || + sdev->queue_depth >= sdev->max_queue_depth) + return; + + if (time_before(jiffies, + sdev->last_queue_ramp_up + sdev->queue_ramp_up_period)) + return; + + if (time_before(jiffies, + sdev->last_queue_full_time + sdev->queue_ramp_up_period)) + return; + + /* + * Walk all devices of a target and do + * ramp up on them. + */ + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->channel != sdev->channel || + tmp_sdev->id != sdev->id || + tmp_sdev->queue_depth == sdev->max_queue_depth) + continue; + /* + * call back into LLD to increase queue_depth by one + * with ramp up reason code. + */ + sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1, + SCSI_QDEPTH_RAMP_UP); + sdev->last_queue_ramp_up = jiffies; + } +} + static void scsi_handle_queue_full(struct scsi_device *sdev) { struct scsi_host_template *sht = sdev->host->hostt; @@ -393,6 +429,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) */ switch (status_byte(scmd->result)) { case GOOD: + scsi_handle_queue_ramp_up(scmd->device); case COMMAND_TERMINATED: return SUCCESS; case CHECK_CONDITION: @@ -1425,6 +1462,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) */ return ADD_TO_MLQUEUE; case GOOD: + scsi_handle_queue_ramp_up(scmd->device); case COMMAND_TERMINATED: return SUCCESS; case TASK_ABORTED: diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 0547a7f44d42..50526fa207e5 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -251,6 +251,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->model = scsi_null_device_strs; sdev->rev = scsi_null_device_strs; sdev->host = shost; + sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; sdev->id = starget->id; sdev->lun = lun; sdev->channel = starget->channel; @@ -941,6 +942,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } } + sdev->max_queue_depth = sdev->queue_depth; + /* * Ok, the device is now all set up, we can * register it and tell the rest of the kernel diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index a48782866b22..758598ff3b90 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -771,6 +771,8 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, if (retval < 0) return retval; + sdev->max_queue_depth = sdev->queue_depth; + return count; } @@ -778,6 +780,37 @@ static struct device_attribute sdev_attr_queue_depth_rw = __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, sdev_store_queue_depth_rw); +static ssize_t +sdev_show_queue_ramp_up_period(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%u\n", + jiffies_to_msecs(sdev->queue_ramp_up_period)); +} + +static ssize_t +sdev_store_queue_ramp_up_period(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned long period; + + if (strict_strtoul(buf, 10, &period)) + return -EINVAL; + + sdev->queue_ramp_up_period = msecs_to_jiffies(period); + return period; +} + +static struct device_attribute sdev_attr_queue_ramp_up_period = + __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, + sdev_show_queue_ramp_up_period, + sdev_store_queue_ramp_up_period); + static ssize_t sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -866,8 +899,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) } /* create queue files, which may be writable, depending on the host */ - if (sdev->host->hostt->change_queue_depth) - error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); + if (sdev->host->hostt->change_queue_depth) { + error = device_create_file(&sdev->sdev_gendev, + &sdev_attr_queue_depth_rw); + error = device_create_file(&sdev->sdev_gendev, + &sdev_attr_queue_ramp_up_period); + } else error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); if (error) { diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 9af48cbf0036..92c4c3bd916d 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -81,11 +81,14 @@ struct scsi_device { struct list_head starved_entry; struct scsi_cmnd *current_cmnd; /* currently active command */ unsigned short queue_depth; /* How deep of a queue we want */ + unsigned short max_queue_depth; /* max queue depth */ unsigned short last_queue_full_depth; /* These two are used by */ unsigned short last_queue_full_count; /* scsi_track_queue_full() */ - unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same - jiffie count on our counter, they - could all be from the same event. */ + unsigned long last_queue_full_time; /* last queue full time */ + unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ +#define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) + + unsigned long last_queue_ramp_up; /* last queue ramp up time */ unsigned int id, lun, channel; -- cgit v1.2.3-59-g8ed1b From 229b8d72f3eccf97e8a9e22436e8fc303b3483cd Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Thu, 15 Oct 2009 17:47:06 -0700 Subject: [SCSI] libfc: add queue_depth ramp up Adjust queue_depth on fc_change_queue_depth call back with reason SCSI_QDEPTH_RAMP_UP, no additional resource adjustments necessary for libfc. Signed-off-by: Vasu Dev Acked-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 48de805eb193..479af9352a42 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2056,6 +2056,9 @@ int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) case SCSI_QDEPTH_QFULL: scsi_track_queue_full(sdev, qdepth); break; + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; default: return -EOPNOTSUPP; } -- cgit v1.2.3-59-g8ed1b From 42e62a74377bcbb526565a31aa18da8f712b93ee Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Thu, 15 Oct 2009 17:47:11 -0700 Subject: [SCSI] zfcp: Adapt change_queue_depth for queue full tracking Adapt the change_queue_depth callback in zfcp for the new reason parameter. Simply pass each call back to the SCSI midlayer, there are no resource adjustments necessary for zfcp. Signed-off-by: Christof Schmitt Removes check for (depth <= default_depth) in case of SCSI_QDEPTH_RAMP_UP call back, not needed after added max_queue_depth per sdev. Signed-off-by: Vasu Dev Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_scsi.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index ad1154701729..f54655998bd5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -32,10 +32,19 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { - if (reason != SCSI_QDEPTH_DEFAULT) + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + break; + case SCSI_QDEPTH_QFULL: + scsi_track_queue_full(sdev, depth); + break; + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + break; + default: return -EOPNOTSUPP; - - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + } return sdev->queue_depth; } -- cgit v1.2.3-59-g8ed1b From 3ae31f6a7b6e442fc6a92f29330fbad230dc3992 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 21 Oct 2009 09:22:46 -0700 Subject: [SCSI] scsi_dh: Change the scsidh_activate interface to be asynchronous Make scsi_dh_activate() function asynchronous, by taking in two additional parameters, one is the callback function and the other is the data to call the callback function with. Signed-off-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/md/dm-mpath.c | 8 ++++---- drivers/scsi/device_handler/scsi_dh.c | 17 ++++++++++++----- drivers/scsi/device_handler/scsi_dh_alua.c | 7 +++++-- drivers/scsi/device_handler/scsi_dh_emc.c | 7 +++++-- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 7 +++++-- drivers/scsi/device_handler/scsi_dh_rdac.c | 7 +++++-- include/scsi/scsi_device.h | 3 ++- include/scsi/scsi_dh.h | 6 ++++-- 8 files changed, 42 insertions(+), 20 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 32d0b878eccc..dce971dbdfa3 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1116,8 +1116,9 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) return limit_reached; } -static void pg_init_done(struct dm_path *path, int errors) +static void pg_init_done(void *data, int errors) { + struct dm_path *path = data; struct pgpath *pgpath = path_to_pgpath(path); struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; @@ -1183,12 +1184,11 @@ static void pg_init_done(struct dm_path *path, int errors) static void activate_path(struct work_struct *work) { - int ret; struct pgpath *pgpath = container_of(work, struct pgpath, activate_path); - ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); - pg_init_done(&pgpath->path, ret); + scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), + pg_init_done, &pgpath->path); } /* diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 3ee1cbc89479..6f7f798910e8 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -226,7 +226,7 @@ store_dh_state(struct device *dev, struct device_attribute *attr, * Activate a device handler */ if (scsi_dh->activate) - err = scsi_dh->activate(sdev); + err = scsi_dh->activate(sdev, NULL, NULL); else err = 0; } @@ -423,10 +423,17 @@ EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); /* * scsi_dh_activate - activate the path associated with the scsi_device * corresponding to the given request queue. - * @q - Request queue that is associated with the scsi_device to be - * activated. + * Returns immediately without waiting for activation to be completed. + * @q - Request queue that is associated with the scsi_device to be + * activated. + * @fn - Function to be called upon completion of the activation. + * Function fn is called with data (below) and the error code. + * Function fn may be called from the same calling context. So, + * do not hold the lock in the caller which may be needed in fn. + * @data - data passed to the function fn upon completion. + * */ -int scsi_dh_activate(struct request_queue *q) +int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) { int err = 0; unsigned long flags; @@ -445,7 +452,7 @@ int scsi_dh_activate(struct request_queue *q) return err; if (scsi_dh->activate) - err = scsi_dh->activate(sdev); + err = scsi_dh->activate(sdev, fn, data); put_device(&sdev->sdev_gendev); return err; } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index b5cdefaf2608..e8a8928e58bc 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -652,7 +652,8 @@ out: * based on a certain policy. But until we actually encounter them it * should be okay. */ -static int alua_activate(struct scsi_device *sdev) +static int alua_activate(struct scsi_device *sdev, + activate_complete fn, void *data) { struct alua_dh_data *h = get_alua_data(sdev); int err = SCSI_DH_OK; @@ -667,7 +668,9 @@ static int alua_activate(struct scsi_device *sdev) err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); out: - return err; + if (fn) + fn(data, err); + return 0; } /* diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 0cffe84976fe..61966750bd60 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -528,7 +528,8 @@ retry: return err; } -static int clariion_activate(struct scsi_device *sdev) +static int clariion_activate(struct scsi_device *sdev, + activate_complete fn, void *data) { struct clariion_dh_data *csdev = get_clariion_data(sdev); int result; @@ -559,7 +560,9 @@ done: csdev->port, lun_state[csdev->lun_state], csdev->default_sp + 'A'); - return result; + if (fn) + fn(data, result); + return 0; } /* * params - parameters in the following format diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index f7da7530875e..0aacafc96f21 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -268,7 +268,8 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) * activate the passive path (and deactivate the * previously active one). */ -static int hp_sw_activate(struct scsi_device *sdev) +static int hp_sw_activate(struct scsi_device *sdev, + activate_complete fn, void *data) { int ret = SCSI_DH_OK; struct hp_sw_dh_data *h = get_hp_sw_data(sdev); @@ -283,7 +284,9 @@ static int hp_sw_activate(struct scsi_device *sdev) HP_SW_NAME); } - return ret; + if (fn) + fn(data, ret); + return 0; } static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 268189d31d9c..be362adbd8e7 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -568,7 +568,8 @@ done: return err; } -static int rdac_activate(struct scsi_device *sdev) +static int rdac_activate(struct scsi_device *sdev, + activate_complete fn, void *data) { struct rdac_dh_data *h = get_rdac_data(sdev); int err = SCSI_DH_OK; @@ -580,7 +581,9 @@ static int rdac_activate(struct scsi_device *sdev) if (h->lun_state == RDAC_LUN_UNOWNED) err = send_mode_select(sdev, h); done: - return err; + if (fn) + fn(data, err); + return 0; } static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 92c4c3bd916d..68d185c79bae 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -177,6 +177,7 @@ struct scsi_dh_devlist { char *model; }; +typedef void (*activate_complete)(void *, int); struct scsi_device_handler { /* Used by the infrastructure */ struct list_head list; /* list of scsi_device_handlers */ @@ -188,7 +189,7 @@ struct scsi_device_handler { int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); int (*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); - int (*activate)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); int (*prep_fn)(struct scsi_device *, struct request *); int (*set_params)(struct scsi_device *, const char *); }; diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index ff2407405b42..e3f2db212ddc 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -56,14 +56,16 @@ enum { SCSI_DH_DRIVER_MAX, }; #if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE) -extern int scsi_dh_activate(struct request_queue *); +extern int scsi_dh_activate(struct request_queue *, activate_complete, void *); extern int scsi_dh_handler_exist(const char *); extern int scsi_dh_attach(struct request_queue *, const char *); extern void scsi_dh_detach(struct request_queue *); extern int scsi_dh_set_params(struct request_queue *, const char *); #else -static inline int scsi_dh_activate(struct request_queue *req) +static inline int scsi_dh_activate(struct request_queue *req, + activate_complete fn, void *data) { + fn(data, 0); return 0; } static inline int scsi_dh_handler_exist(const char *name) -- cgit v1.2.3-59-g8ed1b From 970f3f47e7c97c0bfe9f91356943b55ac389cb1d Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 21 Oct 2009 09:22:51 -0700 Subject: [SCSI] scsi_dh: Make rdac hardware handler's activate() async Batch up MODE_SELECT in rdac device handler. LSI RDAC storage has the capability of handling mode selects for multiple luns in a same command. Make use of that ability to send as few MODE SELECTs as possible to the storage controller as possible. This patch creates a work queue and queues up activate requests when a MODE SELECT is sent down the wire. When that MODE SELECT completes, it compiles queued up activate requests for multiple luns into a single MODE SELECT. This reduces the time to do failover/failback of large number of LUNS. Signed-off-by: Babu Moger Signed-off-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 108 ++++++++++++++++++++++++++--- 1 file changed, 100 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index be362adbd8e7..47cfe1c49c3e 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -22,6 +22,7 @@ #include #include #include +#include #define RDAC_NAME "rdac" #define RDAC_RETRY_COUNT 5 @@ -138,7 +139,13 @@ struct rdac_controller { } mode_select; u8 index; u8 array_name[ARRAY_LABEL_LEN]; + spinlock_t ms_lock; + int ms_queued; + struct work_struct ms_work; + struct scsi_device *ms_sdev; + struct list_head ms_head; }; + struct c8_inquiry { u8 peripheral_info; u8 page_code; /* 0xC8 */ @@ -198,8 +205,17 @@ static const char *lun_state[] = "owned (AVT mode)", }; +struct rdac_queue_data { + struct list_head entry; + struct rdac_dh_data *h; + activate_complete callback_fn; + void *callback_data; +}; + static LIST_HEAD(ctlr_list); static DEFINE_SPINLOCK(list_lock); +static struct workqueue_struct *kmpath_rdacd; +static void send_mode_select(struct work_struct *work); /* * module parameter to enable rdac debug logging. @@ -281,7 +297,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, rdac_pg->subpage_code = 0x1; rdac_pg->page_len[0] = 0x01; rdac_pg->page_len[1] = 0x28; - rdac_pg->lun_table[h->lun] = 0x81; } else { struct rdac_pg_legacy *rdac_pg; @@ -291,7 +306,6 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; rdac_pg->page_len = 0x68; - rdac_pg->lun_table[h->lun] = 0x81; } common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; common->quiescence_timeout = RDAC_QUIESCENCE_TIME; @@ -325,6 +339,7 @@ static void release_controller(struct kref *kref) struct rdac_controller *ctlr; ctlr = container_of(kref, struct rdac_controller, kref); + flush_workqueue(kmpath_rdacd); spin_lock(&list_lock); list_del(&ctlr->node); spin_unlock(&list_lock); @@ -363,6 +378,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, kref_init(&ctlr->kref); ctlr->use_ms10 = -1; + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_lock_init(&ctlr->ms_lock); + INIT_WORK(&ctlr->ms_work, send_mode_select); + INIT_LIST_HEAD(&ctlr->ms_head); list_add(&ctlr->node, &ctlr_list); done: spin_unlock(&list_lock); @@ -490,7 +510,7 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) } static int mode_select_handle_sense(struct scsi_device *sdev, - unsigned char *sensebuf) + unsigned char *sensebuf) { struct scsi_sense_hdr sense_hdr; int err = SCSI_DH_IO, ret; @@ -533,11 +553,29 @@ done: return err; } -static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) +static void send_mode_select(struct work_struct *work) { + struct rdac_controller *ctlr = + container_of(work, struct rdac_controller, ms_work); struct request *rq; + struct scsi_device *sdev = ctlr->ms_sdev; + struct rdac_dh_data *h = get_rdac_data(sdev); struct request_queue *q = sdev->request_queue; int err, retry_cnt = RDAC_RETRY_COUNT; + struct rdac_queue_data *tmp, *qdata; + LIST_HEAD(list); + u8 *lun_table; + + spin_lock(&ctlr->ms_lock); + list_splice_init(&ctlr->ms_head, &list); + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_unlock(&ctlr->ms_lock); + + if (ctlr->use_ms10) + lun_table = ctlr->mode_select.expanded.lun_table; + else + lun_table = ctlr->mode_select.legacy.lun_table; retry: err = SCSI_DH_RES_TEMP_UNAVAIL; @@ -545,6 +583,10 @@ retry: if (!rq) goto done; + list_for_each_entry(qdata, &list, entry) { + lun_table[qdata->h->lun] = 0x81; + } + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, @@ -565,7 +607,41 @@ retry: } done: - return err; + list_for_each_entry_safe(qdata, tmp, &list, entry) { + list_del(&qdata->entry); + if (err == SCSI_DH_OK) + qdata->h->state = RDAC_STATE_ACTIVE; + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + return; +} + +static int queue_mode_select(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct rdac_queue_data *qdata; + struct rdac_controller *ctlr; + + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) + return SCSI_DH_RETRY; + + qdata->h = get_rdac_data(sdev); + qdata->callback_fn = fn; + qdata->callback_data = data; + + ctlr = qdata->h->ctlr; + spin_lock(&ctlr->ms_lock); + list_add_tail(&qdata->entry, &ctlr->ms_head); + if (!ctlr->ms_queued) { + ctlr->ms_queued = 1; + ctlr->ms_sdev = sdev; + queue_work(kmpath_rdacd, &ctlr->ms_work); + } + spin_unlock(&ctlr->ms_lock); + return SCSI_DH_OK; } static int rdac_activate(struct scsi_device *sdev, @@ -578,8 +654,11 @@ static int rdac_activate(struct scsi_device *sdev, if (err != SCSI_DH_OK) goto done; - if (h->lun_state == RDAC_LUN_UNOWNED) - err = send_mode_select(sdev, h); + if (h->lun_state == RDAC_LUN_UNOWNED) { + err = queue_mode_select(sdev, fn, data); + if (err == SCSI_DH_OK) + return 0; + } done: if (fn) fn(data, err); @@ -793,13 +872,26 @@ static int __init rdac_init(void) int r; r = scsi_register_device_handler(&rdac_dh); - if (r != 0) + if (r != 0) { printk(KERN_ERR "Failed to register scsi device handler."); + goto done; + } + + /* + * Create workqueue to handle mode selects for rdac + */ + kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); + if (!kmpath_rdacd) { + scsi_unregister_device_handler(&rdac_dh); + printk(KERN_ERR "kmpath_rdacd creation failed.\n"); + } +done: return r; } static void __exit rdac_exit(void) { + destroy_workqueue(kmpath_rdacd); scsi_unregister_device_handler(&rdac_dh); } -- cgit v1.2.3-59-g8ed1b From 4e2ef86cd5ce057b60acea33bb71c06676e71888 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 21 Oct 2009 09:22:58 -0700 Subject: [SCSI] scsi_dh: Make hp hardware handler's activate() async Make the activate function asynchronous by using blk_execute_rq_nowait() Signed-off-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 87 ++++++++++++++++++----------- 1 file changed, 54 insertions(+), 33 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 0aacafc96f21..857fdd6032b2 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -39,8 +39,14 @@ struct hp_sw_dh_data { unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int path_state; int retries; + int retry_cnt; + struct scsi_device *sdev; + activate_complete callback_fn; + void *callback_data; }; +static int hp_sw_start_stop(struct hp_sw_dh_data *); + static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; @@ -191,19 +197,53 @@ static int start_done(struct scsi_device *sdev, unsigned char *sense) return rc; } +static void start_stop_endio(struct request *req, int error) +{ + struct hp_sw_dh_data *h = req->end_io_data; + unsigned err = SCSI_DH_OK; + + if (error || host_byte(req->errors) != DID_OK || + msg_byte(req->errors) != COMMAND_COMPLETE) { + sdev_printk(KERN_WARNING, h->sdev, + "%s: sending start_stop_unit failed with %x\n", + HP_SW_NAME, req->errors); + err = SCSI_DH_IO; + goto done; + } + + if (req->sense_len > 0) { + err = start_done(h->sdev, h->sense); + if (err == SCSI_DH_RETRY) { + err = SCSI_DH_IO; + if (--h->retry_cnt) { + blk_put_request(req); + err = hp_sw_start_stop(h); + if (err == SCSI_DH_OK) + return; + } + } + } +done: + blk_put_request(req); + if (h->callback_fn) { + h->callback_fn(h->callback_data, err); + h->callback_fn = h->callback_data = NULL; + } + return; + +} + /* * hp_sw_start_stop - Send START STOP UNIT command * @sdev: sdev command should be sent to * * Sending START STOP UNIT activates the SP. */ -static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) +static int hp_sw_start_stop(struct hp_sw_dh_data *h) { struct request *req; - int ret, retry; -retry: - req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); + req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; @@ -217,32 +257,10 @@ retry: req->sense = h->sense; memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; - retry = h->retries; - - ret = blk_execute_rq(req->q, NULL, req, 1); - if (ret == -EIO) { - if (req->sense_len > 0) { - ret = start_done(sdev, h->sense); - } else { - sdev_printk(KERN_WARNING, sdev, - "%s: sending start_stop_unit failed with %x\n", - HP_SW_NAME, req->errors); - ret = SCSI_DH_IO; - } - } else - ret = SCSI_DH_OK; - - if (ret == SCSI_DH_RETRY) { - if (--retry) { - blk_put_request(req); - goto retry; - } - ret = SCSI_DH_IO; - } - - blk_put_request(req); + req->end_io_data = h; - return ret; + blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); + return SCSI_DH_OK; } static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) @@ -277,11 +295,13 @@ static int hp_sw_activate(struct scsi_device *sdev, ret = hp_sw_tur(sdev, h); if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { - ret = hp_sw_start_stop(sdev, h); + h->retry_cnt = h->retries; + h->callback_fn = fn; + h->callback_data = data; + ret = hp_sw_start_stop(h); if (ret == SCSI_DH_OK) - sdev_printk(KERN_INFO, sdev, - "%s: activated path\n", - HP_SW_NAME); + return 0; + h->callback_fn = h->callback_data = NULL; } if (fn) @@ -329,6 +349,7 @@ static int hp_sw_bus_attach(struct scsi_device *sdev) h = (struct hp_sw_dh_data *) scsi_dh_data->buf; h->path_state = HP_SW_PATH_UNINITIALIZED; h->retries = HP_SW_RETRIES; + h->sdev = sdev; ret = hp_sw_tur(sdev, h); if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) -- cgit v1.2.3-59-g8ed1b From 96e6586556dfa80112f42895be93c561582d9930 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 21 Oct 2009 09:23:04 -0700 Subject: [SCSI] scsi_dh: Make alua hardware handler's activate() async Make the activate function asynchronous by using blk_execute_rq_nowait() Signed-off-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 132 ++++++++++++++++------------- 1 file changed, 73 insertions(+), 59 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index e8a8928e58bc..4f0d0138f48b 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -60,11 +60,17 @@ struct alua_dh_data { int bufflen; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int senselen; + struct scsi_device *sdev; + activate_complete callback_fn; + void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 +static char print_alua_state(int); +static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); + static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; @@ -230,19 +236,72 @@ done: return err; } +/* + * alua_stpg - Evaluate SET TARGET GROUP STATES + * @sdev: the device to be evaluated + * @state: the new target group state + * + * Send a SET TARGET GROUP STATES command to the device. + * We only have to test here if we should resubmit the command; + * any other error is assumed as a failure. + */ +static void stpg_endio(struct request *req, int error) +{ + struct alua_dh_data *h = req->end_io_data; + struct scsi_sense_hdr sense_hdr; + unsigned err = SCSI_DH_IO; + + if (error || host_byte(req->errors) != DID_OK || + msg_byte(req->errors) != COMMAND_COMPLETE) + goto done; + + if (err == SCSI_DH_IO && h->senselen > 0) { + err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, + &sense_hdr); + if (!err) { + err = SCSI_DH_IO; + goto done; + } + err = alua_check_sense(h->sdev, &sense_hdr); + if (err == ADD_TO_MLQUEUE) { + err = SCSI_DH_RETRY; + goto done; + } + sdev_printk(KERN_INFO, h->sdev, + "%s: stpg sense code: %02x/%02x/%02x\n", + ALUA_DH_NAME, sense_hdr.sense_key, + sense_hdr.asc, sense_hdr.ascq); + err = SCSI_DH_IO; + } + if (err == SCSI_DH_OK) { + h->state = TPGS_STATE_OPTIMIZED; + sdev_printk(KERN_INFO, h->sdev, + "%s: port group %02x switched to state %c\n", + ALUA_DH_NAME, h->group_id, + print_alua_state(h->state)); + } +done: + blk_put_request(req); + if (h->callback_fn) { + h->callback_fn(h->callback_data, err); + h->callback_fn = h->callback_data = NULL; + } + return; +} + /* * submit_stpg - Issue a SET TARGET GROUP STATES command - * @sdev: sdev the command should be sent to * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups. */ -static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) +static unsigned submit_stpg(struct alua_dh_data *h) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; int stpg_len = 8; + struct scsi_device *sdev = h->sdev; /* Prepare the data buffer */ memset(h->buff, 0, stpg_len); @@ -252,7 +311,7 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); if (!rq) - goto done; + return SCSI_DH_RES_TEMP_UNAVAIL; /* Prepare the command. */ rq->cmd[0] = MAINTENANCE_OUT; @@ -266,17 +325,9 @@ static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; + rq->end_io_data = h; - err = blk_execute_rq(rq->q, NULL, rq, 1); - if (err == -EIO) { - sdev_printk(KERN_INFO, sdev, - "%s: stpg failed with %x\n", - ALUA_DH_NAME, rq->errors); - h->senselen = rq->sense_len; - err = SCSI_DH_IO; - } - blk_put_request(rq); -done: + blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); return err; } @@ -476,50 +527,6 @@ static int alua_check_sense(struct scsi_device *sdev, return SCSI_RETURN_NOT_HANDLED; } -/* - * alua_stpg - Evaluate SET TARGET GROUP STATES - * @sdev: the device to be evaluated - * @state: the new target group state - * - * Send a SET TARGET GROUP STATES command to the device. - * We only have to test here if we should resubmit the command; - * any other error is assumed as a failure. - */ -static int alua_stpg(struct scsi_device *sdev, int state, - struct alua_dh_data *h) -{ - struct scsi_sense_hdr sense_hdr; - unsigned err; - int retry = ALUA_FAILOVER_RETRIES; - - retry: - err = submit_stpg(sdev, h); - if (err == SCSI_DH_IO && h->senselen > 0) { - err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, - &sense_hdr); - if (!err) - return SCSI_DH_IO; - err = alua_check_sense(sdev, &sense_hdr); - if (retry > 0 && err == ADD_TO_MLQUEUE) { - retry--; - goto retry; - } - sdev_printk(KERN_INFO, sdev, - "%s: stpg sense code: %02x/%02x/%02x\n", - ALUA_DH_NAME, sense_hdr.sense_key, - sense_hdr.asc, sense_hdr.ascq); - err = SCSI_DH_IO; - } - if (err == SCSI_DH_OK) { - h->state = state; - sdev_printk(KERN_INFO, sdev, - "%s: port group %02x switched to state %c\n", - ALUA_DH_NAME, h->group_id, - print_alua_state(h->state) ); - } - return err; -} - /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. @@ -664,8 +671,14 @@ static int alua_activate(struct scsi_device *sdev, goto out; } - if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) - err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); + if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { + h->callback_fn = fn; + h->callback_data = data; + err = submit_stpg(h); + if (err == SCSI_DH_OK) + return 0; + h->callback_fn = h->callback_data = NULL; + } out: if (fn) @@ -748,6 +761,7 @@ static int alua_bus_attach(struct scsi_device *sdev) h->rel_port = -1; h->buff = h->inq; h->bufflen = ALUA_INQUIRY_SIZE; + h->sdev = sdev; err = alua_initialize(sdev, h); if (err != SCSI_DH_OK) -- cgit v1.2.3-59-g8ed1b From 851b164231d1117673aa44c00c7622e48b7dfcf4 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Tue, 13 Oct 2009 14:51:05 -0700 Subject: [SCSI] vmw_pvscsi: SCSI driver for VMware's virtual HBA. This is a driver for VMware's paravirtualized SCSI device, which should improve disk performance for guests running under control of VMware hypervisors that support such devices. Signed-off-by: Alok N Kataria Signed-off-by: James Bottomley --- MAINTAINERS | 8 + drivers/scsi/Kconfig | 8 + drivers/scsi/Makefile | 1 + drivers/scsi/vmw_pvscsi.c | 1407 +++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/vmw_pvscsi.h | 397 +++++++++++++ 5 files changed, 1821 insertions(+) create mode 100644 drivers/scsi/vmw_pvscsi.c create mode 100644 drivers/scsi/vmw_pvscsi.h diff --git a/MAINTAINERS b/MAINTAINERS index 016411cadc9a..d1a5cfd2c379 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5696,6 +5696,14 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/vmxnet3/ +VMware PVSCSI driver +M: Alok Kataria +M: VMware PV-Drivers +L: linux-scsi@vger.kernel.org +S: Maintained +F: drivers/scsi/vmw_pvscsi.c +F: drivers/scsi/vmw_pvscsi.h + VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 2e4f7d0ee639..1895259fff0f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -621,6 +621,14 @@ config SCSI_FLASHPOINT substantial, so users of MultiMaster Host Adapters may not wish to include it. +config VMWARE_PVSCSI + tristate "VMware PVSCSI driver support" + depends on PCI && SCSI && X86 + help + This driver supports VMware's para virtualized SCSI HBA. + To compile this driver as a module, choose M here: the + module will be called vmw_pvscsi. + config LIBFC tristate "LibFC module" select SCSI_FC_ATTRS diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 53b1dac7e7d9..5026bdc7b2b7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o +obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c new file mode 100644 index 000000000000..d2604c813a20 --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.c @@ -0,0 +1,1407 @@ +/* + * Linux driver for VMware's para-virtualized SCSI HBA. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained by: Alok N Kataria + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "vmw_pvscsi.h" + +#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" + +MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); +MODULE_AUTHOR("VMware, Inc."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); + +#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 +#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 +#define PVSCSI_DEFAULT_QUEUE_DEPTH 64 +#define SGL_SIZE PAGE_SIZE + +struct pvscsi_sg_list { + struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; +}; + +struct pvscsi_ctx { + /* + * The index of the context in cmd_map serves as the context ID for a + * 1-to-1 mapping completions back to requests. + */ + struct scsi_cmnd *cmd; + struct pvscsi_sg_list *sgl; + struct list_head list; + dma_addr_t dataPA; + dma_addr_t sensePA; + dma_addr_t sglPA; +}; + +struct pvscsi_adapter { + char *mmioBase; + unsigned int irq; + u8 rev; + bool use_msi; + bool use_msix; + bool use_msg; + + spinlock_t hw_lock; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct PVSCSIRingReqDesc *req_ring; + unsigned req_pages; + unsigned req_depth; + dma_addr_t reqRingPA; + + struct PVSCSIRingCmpDesc *cmp_ring; + unsigned cmp_pages; + dma_addr_t cmpRingPA; + + struct PVSCSIRingMsgDesc *msg_ring; + unsigned msg_pages; + dma_addr_t msgRingPA; + + struct PVSCSIRingsState *rings_state; + dma_addr_t ringStatePA; + + struct pci_dev *dev; + struct Scsi_Host *host; + + struct list_head cmd_pool; + struct pvscsi_ctx *cmd_map; +}; + + +/* Command line parameters */ +static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; +static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; +static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; +static bool pvscsi_disable_msi; +static bool pvscsi_disable_msix; +static bool pvscsi_use_msg = true; + +#define PVSCSI_RW (S_IRUSR | S_IWUSR) + +module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); +MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" + __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); + +module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); +MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" + __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); + +module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); +MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" + __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); + +module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); + +module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); + +module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); +MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); + +static const struct pci_device_id pvscsi_pci_tbl[] = { + { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); + +static struct device * +pvscsi_dev(const struct pvscsi_adapter *adapter) +{ + return &(adapter->dev->dev); +} + +static struct pvscsi_ctx * +pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) +{ + struct pvscsi_ctx *ctx, *end; + + end = &adapter->cmd_map[adapter->req_depth]; + for (ctx = adapter->cmd_map; ctx < end; ctx++) + if (ctx->cmd == cmd) + return ctx; + + return NULL; +} + +static struct pvscsi_ctx * +pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) +{ + struct pvscsi_ctx *ctx; + + if (list_empty(&adapter->cmd_pool)) + return NULL; + + ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); + ctx->cmd = cmd; + list_del(&ctx->list); + + return ctx; +} + +static void pvscsi_release_context(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx) +{ + ctx->cmd = NULL; + list_add(&ctx->list, &adapter->cmd_pool); +} + +/* + * Map a pvscsi_ctx struct to a context ID field value; we map to a simple + * non-zero integer. ctx always points to an entry in cmd_map array, hence + * the return value is always >=1. + */ +static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, + const struct pvscsi_ctx *ctx) +{ + return ctx - adapter->cmd_map + 1; +} + +static struct pvscsi_ctx * +pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) +{ + return &adapter->cmd_map[context - 1]; +} + +static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, + u32 offset, u32 val) +{ + writel(val, adapter->mmioBase + offset); +} + +static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) +{ + return readl(adapter->mmioBase + offset); +} + +static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) +{ + return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); +} + +static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, + u32 val) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); +} + +static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) +{ + u32 intr_bits; + + intr_bits = PVSCSI_INTR_CMPL_MASK; + if (adapter->use_msg) + intr_bits |= PVSCSI_INTR_MSG_MASK; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); +} + +static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); +} + +static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, + u32 cmd, const void *desc, size_t len) +{ + const u32 *ptr = desc; + size_t i; + + len /= sizeof(*ptr); + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); + for (i = 0; i < len; i++) + pvscsi_reg_write(adapter, + PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); +} + +static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, + const struct pvscsi_ctx *ctx) +{ + struct PVSCSICmdDescAbortCmd cmd = { 0 }; + + cmd.target = ctx->cmd->device->id; + cmd.context = pvscsi_map_context(adapter, ctx); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); +} + +static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); +} + +static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) +{ + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); +} + +static int scsi_is_rw(unsigned char op) +{ + return op == READ_6 || op == WRITE_6 || + op == READ_10 || op == WRITE_10 || + op == READ_12 || op == WRITE_12 || + op == READ_16 || op == WRITE_16; +} + +static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, + unsigned char op) +{ + if (scsi_is_rw(op)) + pvscsi_kick_rw_io(adapter); + else + pvscsi_process_request_ring(adapter); +} + +static void ll_adapter_reset(const struct pvscsi_adapter *adapter) +{ + dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); +} + +static void ll_bus_reset(const struct pvscsi_adapter *adapter) +{ + dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); +} + +static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) +{ + struct PVSCSICmdDescResetDevice cmd = { 0 }; + + dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); + + cmd.target = target; + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, + &cmd, sizeof(cmd)); +} + +static void pvscsi_create_sg(struct pvscsi_ctx *ctx, + struct scatterlist *sg, unsigned count) +{ + unsigned i; + struct PVSCSISGElement *sge; + + BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); + + sge = &ctx->sgl->sge[0]; + for (i = 0; i < count; i++, sg++) { + sge[i].addr = sg_dma_address(sg); + sge[i].length = sg_dma_len(sg); + sge[i].flags = 0; + } +} + +/* + * Map all data buffers for a command into PCI space and + * setup the scatter/gather list if needed. + */ +static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, + struct PVSCSIRingReqDesc *e) +{ + unsigned count; + unsigned bufflen = scsi_bufflen(cmd); + struct scatterlist *sg; + + e->dataLen = bufflen; + e->dataAddr = 0; + if (bufflen == 0) + return; + + sg = scsi_sglist(cmd); + count = scsi_sg_count(cmd); + if (count != 0) { + int segs = scsi_dma_map(cmd); + if (segs > 1) { + pvscsi_create_sg(ctx, sg, segs); + + e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; + ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, + SGL_SIZE, PCI_DMA_TODEVICE); + e->dataAddr = ctx->sglPA; + } else + e->dataAddr = sg_dma_address(sg); + } else { + /* + * In case there is no S/G list, scsi_sglist points + * directly to the buffer. + */ + ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, + cmd->sc_data_direction); + e->dataAddr = ctx->dataPA; + } +} + +static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx) +{ + struct scsi_cmnd *cmd; + unsigned bufflen; + + cmd = ctx->cmd; + bufflen = scsi_bufflen(cmd); + + if (bufflen != 0) { + unsigned count = scsi_sg_count(cmd); + + if (count != 0) { + scsi_dma_unmap(cmd); + if (ctx->sglPA) { + pci_unmap_single(adapter->dev, ctx->sglPA, + SGL_SIZE, PCI_DMA_TODEVICE); + ctx->sglPA = 0; + } + } else + pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, + cmd->sc_data_direction); + } + if (cmd->sense_buffer) + pci_unmap_single(adapter->dev, ctx->sensePA, + SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); +} + +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter) +{ + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, + &adapter->ringStatePA); + if (!adapter->rings_state) + return -ENOMEM; + + adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, + pvscsi_ring_pages); + adapter->req_depth = adapter->req_pages + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + adapter->req_ring = pci_alloc_consistent(adapter->dev, + adapter->req_pages * PAGE_SIZE, + &adapter->reqRingPA); + if (!adapter->req_ring) + return -ENOMEM; + + adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, + pvscsi_ring_pages); + adapter->cmp_ring = pci_alloc_consistent(adapter->dev, + adapter->cmp_pages * PAGE_SIZE, + &adapter->cmpRingPA); + if (!adapter->cmp_ring) + return -ENOMEM; + + BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); + BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); + BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); + + if (!adapter->use_msg) + return 0; + + adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, + pvscsi_msg_ring_pages); + adapter->msg_ring = pci_alloc_consistent(adapter->dev, + adapter->msg_pages * PAGE_SIZE, + &adapter->msgRingPA); + if (!adapter->msg_ring) + return -ENOMEM; + BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); + + return 0; +} + +static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) +{ + struct PVSCSICmdDescSetupRings cmd = { 0 }; + dma_addr_t base; + unsigned i; + + cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; + cmd.reqRingNumPages = adapter->req_pages; + cmd.cmpRingNumPages = adapter->cmp_pages; + + base = adapter->reqRingPA; + for (i = 0; i < adapter->req_pages; i++) { + cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + + base = adapter->cmpRingPA; + for (i = 0; i < adapter->cmp_pages; i++) { + cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + + memset(adapter->rings_state, 0, PAGE_SIZE); + memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); + memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, + &cmd, sizeof(cmd)); + + if (adapter->use_msg) { + struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; + + cmd_msg.numPages = adapter->msg_pages; + + base = adapter->msgRingPA; + for (i = 0; i < adapter->msg_pages; i++) { + cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; + base += PAGE_SIZE; + } + memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, + &cmd_msg, sizeof(cmd_msg)); + } +} + +/* + * Pull a completion descriptor off and pass the completion back + * to the SCSI mid layer. + */ +static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + const struct PVSCSIRingCmpDesc *e) +{ + struct pvscsi_ctx *ctx; + struct scsi_cmnd *cmd; + u32 btstat = e->hostStatus; + u32 sdstat = e->scsiStatus; + + ctx = pvscsi_get_context(adapter, e->context); + cmd = ctx->cmd; + pvscsi_unmap_buffers(adapter, ctx); + pvscsi_release_context(adapter, ctx); + cmd->result = 0; + + if (sdstat != SAM_STAT_GOOD && + (btstat == BTSTAT_SUCCESS || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { + cmd->result = (DID_OK << 16) | sdstat; + if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) + cmd->result |= (DRIVER_SENSE << 24); + } else + switch (btstat) { + case BTSTAT_SUCCESS: + case BTSTAT_LINKED_COMMAND_COMPLETED: + case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: + /* If everything went fine, let's move on.. */ + cmd->result = (DID_OK << 16); + break; + + case BTSTAT_DATARUN: + case BTSTAT_DATA_UNDERRUN: + /* Report residual data in underruns */ + scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); + cmd->result = (DID_ERROR << 16); + break; + + case BTSTAT_SELTIMEO: + /* Our emulation returns this for non-connected devs */ + cmd->result = (DID_BAD_TARGET << 16); + break; + + case BTSTAT_LUNMISMATCH: + case BTSTAT_TAGREJECT: + case BTSTAT_BADMSG: + cmd->result = (DRIVER_INVALID << 24); + /* fall through */ + + case BTSTAT_HAHARDWARE: + case BTSTAT_INVPHASE: + case BTSTAT_HATIMEOUT: + case BTSTAT_NORESPONSE: + case BTSTAT_DISCONNECT: + case BTSTAT_HASOFTWARE: + case BTSTAT_BUSFREE: + case BTSTAT_SENSFAILED: + cmd->result |= (DID_ERROR << 16); + break; + + case BTSTAT_SENTRST: + case BTSTAT_RECVRST: + case BTSTAT_BUSRESET: + cmd->result = (DID_RESET << 16); + break; + + case BTSTAT_ABORTQUEUE: + cmd->result = (DID_ABORT << 16); + break; + + case BTSTAT_SCSIPARITY: + cmd->result = (DID_PARITY << 16); + break; + + default: + cmd->result = (DID_ERROR << 16); + scmd_printk(KERN_DEBUG, cmd, + "Unknown completion status: 0x%x\n", + btstat); + } + + dev_dbg(&cmd->device->sdev_gendev, + "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", + cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); + + cmd->scsi_done(cmd); +} + +/* + * barrier usage : Since the PVSCSI device is emulated, there could be cases + * where we may want to serialize some accesses between the driver and the + * emulation layer. We use compiler barriers instead of the more expensive + * memory barriers because PVSCSI is only supported on X86 which has strong + * memory access ordering. + */ +static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; + u32 cmp_entries = s->cmpNumEntriesLog2; + + while (s->cmpConsIdx != s->cmpProdIdx) { + struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & + MASK(cmp_entries)); + /* + * This barrier() ensures that *e is not dereferenced while + * the device emulation still writes data into the slot. + * Since the device emulation advances s->cmpProdIdx only after + * updating the slot we want to check it first. + */ + barrier(); + pvscsi_complete_request(adapter, e); + /* + * This barrier() ensures that compiler doesn't reorder write + * to s->cmpConsIdx before the read of (*e) inside + * pvscsi_complete_request. Otherwise, device emulation may + * overwrite *e before we had a chance to read it. + */ + barrier(); + s->cmpConsIdx++; + } +} + +/* + * Translate a Linux SCSI request into a request ring entry. + */ +static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) +{ + struct PVSCSIRingsState *s; + struct PVSCSIRingReqDesc *e; + struct scsi_device *sdev; + u32 req_entries; + + s = adapter->rings_state; + sdev = cmd->device; + req_entries = s->reqNumEntriesLog2; + + /* + * If this condition holds, we might have room on the request ring, but + * we might not have room on the completion ring for the response. + * However, we have already ruled out this possibility - we would not + * have successfully allocated a context if it were true, since we only + * have one context per request entry. Check for it anyway, since it + * would be a serious bug. + */ + if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { + scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " + "ring full: reqProdIdx=%d cmpConsIdx=%d\n", + s->reqProdIdx, s->cmpConsIdx); + return -1; + } + + e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); + + e->bus = sdev->channel; + e->target = sdev->id; + memset(e->lun, 0, sizeof(e->lun)); + e->lun[1] = sdev->lun; + + if (cmd->sense_buffer) { + ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + PCI_DMA_FROMDEVICE); + e->senseAddr = ctx->sensePA; + e->senseLen = SCSI_SENSE_BUFFERSIZE; + } else { + e->senseLen = 0; + e->senseAddr = 0; + } + e->cdbLen = cmd->cmd_len; + e->vcpuHint = smp_processor_id(); + memcpy(e->cdb, cmd->cmnd, e->cdbLen); + + e->tag = SIMPLE_QUEUE_TAG; + if (sdev->tagged_supported && + (cmd->tag == HEAD_OF_QUEUE_TAG || + cmd->tag == ORDERED_QUEUE_TAG)) + e->tag = cmd->tag; + + if (cmd->sc_data_direction == DMA_FROM_DEVICE) + e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; + else if (cmd->sc_data_direction == DMA_TO_DEVICE) + e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; + else if (cmd->sc_data_direction == DMA_NONE) + e->flags = PVSCSI_FLAG_CMD_DIR_NONE; + else + e->flags = 0; + + pvscsi_map_buffers(adapter, ctx, cmd, e); + + e->context = pvscsi_map_context(adapter, ctx); + + barrier(); + + s->reqProdIdx++; + + return 0; +} + +static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + struct pvscsi_ctx *ctx; + unsigned long flags; + + spin_lock_irqsave(&adapter->hw_lock, flags); + + ctx = pvscsi_acquire_context(adapter, cmd); + if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { + if (ctx) + pvscsi_release_context(adapter, ctx); + spin_unlock_irqrestore(&adapter->hw_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + cmd->scsi_done = done; + + dev_dbg(&cmd->device->sdev_gendev, + "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + pvscsi_kick_io(adapter, cmd->cmnd[0]); + + return 0; +} + +static int pvscsi_abort(struct scsi_cmnd *cmd) +{ + struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); + struct pvscsi_ctx *ctx; + unsigned long flags; + + scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", + adapter->host->host_no, cmd); + + spin_lock_irqsave(&adapter->hw_lock, flags); + + /* + * Poll the completion ring first - we might be trying to abort + * a command that is waiting to be dispatched in the completion ring. + */ + pvscsi_process_completion_ring(adapter); + + /* + * If there is no context for the command, it either already succeeded + * or else was never properly issued. Not our problem. + */ + ctx = pvscsi_find_context(adapter, cmd); + if (!ctx) { + scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); + goto out; + } + + pvscsi_abort_cmd(adapter, ctx); + + pvscsi_process_completion_ring(adapter); + +out: + spin_unlock_irqrestore(&adapter->hw_lock, flags); + return SUCCESS; +} + +/* + * Abort all outstanding requests. This is only safe to use if the completion + * ring will never be walked again or the device has been reset, because it + * destroys the 1-1 mapping between context field passed to emulation and our + * request structure. + */ +static void pvscsi_reset_all(struct pvscsi_adapter *adapter) +{ + unsigned i; + + for (i = 0; i < adapter->req_depth; i++) { + struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; + struct scsi_cmnd *cmd = ctx->cmd; + if (cmd) { + scmd_printk(KERN_ERR, cmd, + "Forced reset on cmd %p\n", cmd); + pvscsi_unmap_buffers(adapter, ctx); + pvscsi_release_context(adapter, ctx); + cmd->result = (DID_RESET << 16); + cmd->scsi_done(cmd); + } + } +} + +static int pvscsi_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + bool use_msg; + + scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); + + spin_lock_irqsave(&adapter->hw_lock, flags); + + use_msg = adapter->use_msg; + + if (use_msg) { + adapter->use_msg = 0; + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + /* + * Now that we know that the ISR won't add more work on the + * workqueue we can safely flush any outstanding work. + */ + flush_workqueue(adapter->workqueue); + spin_lock_irqsave(&adapter->hw_lock, flags); + } + + /* + * We're going to tear down the entire ring structure and set it back + * up, so stalling new requests until all completions are flushed and + * the rings are back in place. + */ + + pvscsi_process_request_ring(adapter); + + ll_adapter_reset(adapter); + + /* + * Now process any completions. Note we do this AFTER adapter reset, + * which is strange, but stops races where completions get posted + * between processing the ring and issuing the reset. The backend will + * not touch the ring memory after reset, so the immediately pre-reset + * completion ring state is still valid. + */ + pvscsi_process_completion_ring(adapter); + + pvscsi_reset_all(adapter); + adapter->use_msg = use_msg; + pvscsi_setup_all_rings(adapter); + pvscsi_unmask_intr(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static int pvscsi_bus_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + + scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); + + /* + * We don't want to queue new requests for this bus after + * flushing all pending requests to emulation, since new + * requests could then sneak in during this bus reset phase, + * so take the lock now. + */ + spin_lock_irqsave(&adapter->hw_lock, flags); + + pvscsi_process_request_ring(adapter); + ll_bus_reset(adapter); + pvscsi_process_completion_ring(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static int pvscsi_device_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct pvscsi_adapter *adapter = shost_priv(host); + unsigned long flags; + + scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", + host->host_no, cmd->device->id); + + /* + * We don't want to queue new requests for this device after flushing + * all pending requests to emulation, since new requests could then + * sneak in during this device reset phase, so take the lock now. + */ + spin_lock_irqsave(&adapter->hw_lock, flags); + + pvscsi_process_request_ring(adapter); + ll_device_reset(adapter, cmd->device->id); + pvscsi_process_completion_ring(adapter); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + + return SUCCESS; +} + +static struct scsi_host_template pvscsi_template; + +static const char *pvscsi_info(struct Scsi_Host *host) +{ + struct pvscsi_adapter *adapter = shost_priv(host); + static char buf[256]; + + sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " + "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, + adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, + pvscsi_template.cmd_per_lun); + + return buf; +} + +static struct scsi_host_template pvscsi_template = { + .module = THIS_MODULE, + .name = "VMware PVSCSI Host Adapter", + .proc_name = "vmw_pvscsi", + .info = pvscsi_info, + .queuecommand = pvscsi_queue, + .this_id = -1, + .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, + .dma_boundary = UINT_MAX, + .max_sectors = 0xffff, + .use_clustering = ENABLE_CLUSTERING, + .eh_abort_handler = pvscsi_abort, + .eh_device_reset_handler = pvscsi_device_reset, + .eh_bus_reset_handler = pvscsi_bus_reset, + .eh_host_reset_handler = pvscsi_host_reset, +}; + +static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, + const struct PVSCSIRingMsgDesc *e) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct Scsi_Host *host = adapter->host; + struct scsi_device *sdev; + + printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", + e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); + + BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); + + if (e->type == PVSCSI_MSG_DEV_ADDED) { + struct PVSCSIMsgDescDevStatusChanged *desc; + desc = (struct PVSCSIMsgDescDevStatusChanged *)e; + + printk(KERN_INFO + "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + if (!scsi_host_get(host)) + return; + + sdev = scsi_device_lookup(host, desc->bus, desc->target, + desc->lun[1]); + if (sdev) { + printk(KERN_INFO "vmw_pvscsi: device already exists\n"); + scsi_device_put(sdev); + } else + scsi_add_device(adapter->host, desc->bus, + desc->target, desc->lun[1]); + + scsi_host_put(host); + } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { + struct PVSCSIMsgDescDevStatusChanged *desc; + desc = (struct PVSCSIMsgDescDevStatusChanged *)e; + + printk(KERN_INFO + "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + if (!scsi_host_get(host)) + return; + + sdev = scsi_device_lookup(host, desc->bus, desc->target, + desc->lun[1]); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else + printk(KERN_INFO + "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", + desc->bus, desc->target, desc->lun[1]); + + scsi_host_put(host); + } +} + +static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + + return s->msgProdIdx != s->msgConsIdx; +} + +static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) +{ + struct PVSCSIRingsState *s = adapter->rings_state; + struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; + u32 msg_entries = s->msgNumEntriesLog2; + + while (pvscsi_msg_pending(adapter)) { + struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & + MASK(msg_entries)); + + barrier(); + pvscsi_process_msg(adapter, e); + barrier(); + s->msgConsIdx++; + } +} + +static void pvscsi_msg_workqueue_handler(struct work_struct *data) +{ + struct pvscsi_adapter *adapter; + + adapter = container_of(data, struct pvscsi_adapter, work); + + pvscsi_process_msg_ring(adapter); +} + +static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) +{ + char name[32]; + + if (!pvscsi_use_msg) + return 0; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, + PVSCSI_CMD_SETUP_MSG_RING); + + if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) + return 0; + + snprintf(name, sizeof(name), + "vmw_pvscsi_wq_%u", adapter->host->host_no); + + adapter->workqueue = create_singlethread_workqueue(name); + if (!adapter->workqueue) { + printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); + return 0; + } + INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); + + return 1; +} + +static irqreturn_t pvscsi_isr(int irq, void *devp) +{ + struct pvscsi_adapter *adapter = devp; + int handled; + + if (adapter->use_msi || adapter->use_msix) + handled = true; + else { + u32 val = pvscsi_read_intr_status(adapter); + handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; + if (handled) + pvscsi_write_intr_status(devp, val); + } + + if (handled) { + unsigned long flags; + + spin_lock_irqsave(&adapter->hw_lock, flags); + + pvscsi_process_completion_ring(adapter); + if (adapter->use_msg && pvscsi_msg_pending(adapter)) + queue_work(adapter->workqueue, &adapter->work); + + spin_unlock_irqrestore(&adapter->hw_lock, flags); + } + + return IRQ_RETVAL(handled); +} + +static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) +{ + struct pvscsi_ctx *ctx = adapter->cmd_map; + unsigned i; + + for (i = 0; i < adapter->req_depth; ++i, ++ctx) + free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); +} + +static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) +{ + struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; + int ret; + + ret = pci_enable_msix(adapter->dev, &entry, 1); + if (ret) + return ret; + + *irq = entry.vector; + + return 0; +} + +static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) +{ + if (adapter->irq) { + free_irq(adapter->irq, adapter); + adapter->irq = 0; + } + if (adapter->use_msi) { + pci_disable_msi(adapter->dev); + adapter->use_msi = 0; + } else if (adapter->use_msix) { + pci_disable_msix(adapter->dev); + adapter->use_msix = 0; + } +} + +static void pvscsi_release_resources(struct pvscsi_adapter *adapter) +{ + pvscsi_shutdown_intr(adapter); + + if (adapter->workqueue) + destroy_workqueue(adapter->workqueue); + + if (adapter->mmioBase) + pci_iounmap(adapter->dev, adapter->mmioBase); + + pci_release_regions(adapter->dev); + + if (adapter->cmd_map) { + pvscsi_free_sgls(adapter); + kfree(adapter->cmd_map); + } + + if (adapter->rings_state) + pci_free_consistent(adapter->dev, PAGE_SIZE, + adapter->rings_state, adapter->ringStatePA); + + if (adapter->req_ring) + pci_free_consistent(adapter->dev, + adapter->req_pages * PAGE_SIZE, + adapter->req_ring, adapter->reqRingPA); + + if (adapter->cmp_ring) + pci_free_consistent(adapter->dev, + adapter->cmp_pages * PAGE_SIZE, + adapter->cmp_ring, adapter->cmpRingPA); + + if (adapter->msg_ring) + pci_free_consistent(adapter->dev, + adapter->msg_pages * PAGE_SIZE, + adapter->msg_ring, adapter->msgRingPA); +} + +/* + * Allocate scatter gather lists. + * + * These are statically allocated. Trying to be clever was not worth it. + * + * Dynamic allocation can fail, and we can't go deeep into the memory + * allocator, since we're a SCSI driver, and trying too hard to allocate + * memory might generate disk I/O. We also don't want to fail disk I/O + * in that case because we can't get an allocation - the I/O could be + * trying to swap out data to free memory. Since that is pathological, + * just use a statically allocated scatter list. + * + */ +static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) +{ + struct pvscsi_ctx *ctx; + int i; + + ctx = adapter->cmd_map; + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); + + for (i = 0; i < adapter->req_depth; ++i, ++ctx) { + ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, + get_order(SGL_SIZE)); + ctx->sglPA = 0; + BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); + if (!ctx->sgl) { + for (; i >= 0; --i, --ctx) { + free_pages((unsigned long)ctx->sgl, + get_order(SGL_SIZE)); + ctx->sgl = NULL; + } + return -ENOMEM; + } + } + + return 0; +} + +static int __devinit pvscsi_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pvscsi_adapter *adapter; + struct Scsi_Host *host; + unsigned int i; + unsigned long flags = 0; + int error; + + error = -ENODEV; + + if (pci_enable_device(pdev)) + return error; + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); + } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { + printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); + } else { + printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); + goto out_disable_device; + } + + pvscsi_template.can_queue = + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + pvscsi_template.cmd_per_lun = + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); + if (!host) { + printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); + goto out_disable_device; + } + + adapter = shost_priv(host); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = pdev; + adapter->host = host; + + spin_lock_init(&adapter->hw_lock); + + host->max_channel = 0; + host->max_id = 16; + host->max_lun = 1; + host->max_cmd_len = 16; + + adapter->rev = pdev->revision; + + if (pci_request_regions(pdev, "vmw_pvscsi")) { + printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); + goto out_free_host; + } + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) + continue; + + if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) + continue; + + break; + } + + if (i == DEVICE_COUNT_RESOURCE) { + printk(KERN_ERR + "vmw_pvscsi: adapter has no suitable MMIO region\n"); + goto out_release_resources; + } + + adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); + + if (!adapter->mmioBase) { + printk(KERN_ERR + "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", + i, PVSCSI_MEM_SPACE_SIZE); + goto out_release_resources; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, host); + + ll_adapter_reset(adapter); + + adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); + + error = pvscsi_allocate_rings(adapter); + if (error) { + printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); + goto out_release_resources; + } + + /* + * From this point on we should reset the adapter if anything goes + * wrong. + */ + pvscsi_setup_all_rings(adapter); + + adapter->cmd_map = kcalloc(adapter->req_depth, + sizeof(struct pvscsi_ctx), GFP_KERNEL); + if (!adapter->cmd_map) { + printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); + error = -ENOMEM; + goto out_reset_adapter; + } + + INIT_LIST_HEAD(&adapter->cmd_pool); + for (i = 0; i < adapter->req_depth; i++) { + struct pvscsi_ctx *ctx = adapter->cmd_map + i; + list_add(&ctx->list, &adapter->cmd_pool); + } + + error = pvscsi_allocate_sg(adapter); + if (error) { + printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); + goto out_reset_adapter; + } + + if (!pvscsi_disable_msix && + pvscsi_setup_msix(adapter, &adapter->irq) == 0) { + printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); + adapter->use_msix = 1; + } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { + printk(KERN_INFO "vmw_pvscsi: using MSI\n"); + adapter->use_msi = 1; + adapter->irq = pdev->irq; + } else { + printk(KERN_INFO "vmw_pvscsi: using INTx\n"); + adapter->irq = pdev->irq; + flags = IRQF_SHARED; + } + + error = request_irq(adapter->irq, pvscsi_isr, flags, + "vmw_pvscsi", adapter); + if (error) { + printk(KERN_ERR + "vmw_pvscsi: unable to request IRQ: %d\n", error); + adapter->irq = 0; + goto out_reset_adapter; + } + + error = scsi_add_host(host, &pdev->dev); + if (error) { + printk(KERN_ERR + "vmw_pvscsi: scsi_add_host failed: %d\n", error); + goto out_reset_adapter; + } + + dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", + adapter->rev, host->host_no); + + pvscsi_unmask_intr(adapter); + + scsi_scan_host(host); + + return 0; + +out_reset_adapter: + ll_adapter_reset(adapter); +out_release_resources: + pvscsi_release_resources(adapter); +out_free_host: + scsi_host_put(host); +out_disable_device: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + + return error; +} + +static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) +{ + pvscsi_mask_intr(adapter); + + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + + pvscsi_shutdown_intr(adapter); + + pvscsi_process_request_ring(adapter); + pvscsi_process_completion_ring(adapter); + ll_adapter_reset(adapter); +} + +static void pvscsi_shutdown(struct pci_dev *dev) +{ + struct Scsi_Host *host = pci_get_drvdata(dev); + struct pvscsi_adapter *adapter = shost_priv(host); + + __pvscsi_shutdown(adapter); +} + +static void pvscsi_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct pvscsi_adapter *adapter = shost_priv(host); + + scsi_remove_host(host); + + __pvscsi_shutdown(adapter); + pvscsi_release_resources(adapter); + + scsi_host_put(host); + + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); +} + +static struct pci_driver pvscsi_pci_driver = { + .name = "vmw_pvscsi", + .id_table = pvscsi_pci_tbl, + .probe = pvscsi_probe, + .remove = __devexit_p(pvscsi_remove), + .shutdown = pvscsi_shutdown, +}; + +static int __init pvscsi_init(void) +{ + pr_info("%s - version %s\n", + PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); + return pci_register_driver(&pvscsi_pci_driver); +} + +static void __exit pvscsi_exit(void) +{ + pci_unregister_driver(&pvscsi_pci_driver); +} + +module_init(pvscsi_init); +module_exit(pvscsi_exit); diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h new file mode 100644 index 000000000000..62e36e75715e --- /dev/null +++ b/drivers/scsi/vmw_pvscsi.h @@ -0,0 +1,397 @@ +/* + * VMware PVSCSI header file + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained by: Alok N Kataria + * + */ + +#ifndef _VMW_PVSCSI_H_ +#define _VMW_PVSCSI_H_ + +#include + +#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" + +#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 + +#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ + +#define PCI_VENDOR_ID_VMWARE 0x15AD +#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 + +/* + * host adapter status/error codes + */ +enum HostBusAdapterStatus { + BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ + BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, + BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, + BTSTAT_DATA_UNDERRUN = 0x0c, + BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ + BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ + BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ + BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ + BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ + BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ + BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ + BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ + BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ + BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ + BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ + BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ + BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ + BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ + BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ + BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ + BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ + BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ +}; + +/* + * Register offsets. + * + * These registers are accessible both via i/o space and mm i/o. + */ + +enum PVSCSIRegOffset { + PVSCSI_REG_OFFSET_COMMAND = 0x0, + PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4, + PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8, + PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100, + PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104, + PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108, + PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c, + PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c, + PVSCSI_REG_OFFSET_INTR_MASK = 0x2010, + PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014, + PVSCSI_REG_OFFSET_DEBUG = 0x3018, + PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018, +}; + +/* + * Virtual h/w commands. + */ + +enum PVSCSICommands { + PVSCSI_CMD_FIRST = 0, /* has to be first */ + + PVSCSI_CMD_ADAPTER_RESET = 1, + PVSCSI_CMD_ISSUE_SCSI = 2, + PVSCSI_CMD_SETUP_RINGS = 3, + PVSCSI_CMD_RESET_BUS = 4, + PVSCSI_CMD_RESET_DEVICE = 5, + PVSCSI_CMD_ABORT_CMD = 6, + PVSCSI_CMD_CONFIG = 7, + PVSCSI_CMD_SETUP_MSG_RING = 8, + PVSCSI_CMD_DEVICE_UNPLUG = 9, + + PVSCSI_CMD_LAST = 10 /* has to be last */ +}; + +/* + * Command descriptor for PVSCSI_CMD_RESET_DEVICE -- + */ + +struct PVSCSICmdDescResetDevice { + u32 target; + u8 lun[8]; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_ABORT_CMD -- + * + * - currently does not support specifying the LUN. + * - _pad should be 0. + */ + +struct PVSCSICmdDescAbortCmd { + u64 context; + u32 target; + u32 _pad; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_RINGS -- + * + * Notes: + * - reqRingNumPages and cmpRingNumPages need to be power of two. + * - reqRingNumPages and cmpRingNumPages need to be different from 0, + * - reqRingNumPages and cmpRingNumPages need to be inferior to + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES. + */ + +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32 +struct PVSCSICmdDescSetupRings { + u32 reqRingNumPages; + u32 cmpRingNumPages; + u64 ringsStatePPN; + u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; + u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; +} __packed; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING -- + * + * Notes: + * - this command was not supported in the initial revision of the h/w + * interface. Before using it, you need to check that it is supported by + * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then + * immediately after read the 'command status' register: + * * a value of -1 means that the cmd is NOT supported, + * * a value != -1 means that the cmd IS supported. + * If it's supported the 'command status' register should return: + * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32). + * - this command should be issued _after_ the usual SETUP_RINGS so that the + * RingsState page is already setup. If not, the command is a nop. + * - numPages needs to be a power of two, + * - numPages needs to be different from 0, + * - _pad should be zero. + */ + +#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16 + +struct PVSCSICmdDescSetupMsgRing { + u32 numPages; + u32 _pad; + u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; +} __packed; + +enum PVSCSIMsgType { + PVSCSI_MSG_DEV_ADDED = 0, + PVSCSI_MSG_DEV_REMOVED = 1, + PVSCSI_MSG_LAST = 2, +}; + +/* + * Msg descriptor. + * + * sizeof(struct PVSCSIRingMsgDesc) == 128. + * + * - type is of type enum PVSCSIMsgType. + * - the content of args depend on the type of event being delivered. + */ + +struct PVSCSIRingMsgDesc { + u32 type; + u32 args[31]; +} __packed; + +struct PVSCSIMsgDescDevStatusChanged { + u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */ + u32 bus; + u32 target; + u8 lun[8]; + u32 pad[27]; +} __packed; + +/* + * Rings state. + * + * - the fields: + * . msgProdIdx, + * . msgConsIdx, + * . msgNumEntriesLog2, + * .. are only used once the SETUP_MSG_RING cmd has been issued. + * - '_pad' helps to ensure that the msg related fields are on their own + * cache-line. + */ + +struct PVSCSIRingsState { + u32 reqProdIdx; + u32 reqConsIdx; + u32 reqNumEntriesLog2; + + u32 cmpProdIdx; + u32 cmpConsIdx; + u32 cmpNumEntriesLog2; + + u8 _pad[104]; + + u32 msgProdIdx; + u32 msgConsIdx; + u32 msgNumEntriesLog2; +} __packed; + +/* + * Request descriptor. + * + * sizeof(RingReqDesc) = 128 + * + * - context: is a unique identifier of a command. It could normally be any + * 64bit value, however we currently store it in the serialNumber variable + * of struct SCSI_Command, so we have the following restrictions due to the + * way this field is handled in the vmkernel storage stack: + * * this value can't be 0, + * * the upper 32bit need to be 0 since serialNumber is as a u32. + * Currently tracked as PR 292060. + * - dataLen: contains the total number of bytes that need to be transferred. + * - dataAddr: + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first + * s/g table segment, each s/g segment is entirely contained on a single + * page of physical memory, + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of + * the buffer used for the DMA transfer, + * - flags: + * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above, + * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved, + * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory, + * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device, + * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than + * 16bytes. To be specified. + * - vcpuHint: vcpuId of the processor that will be most likely waiting for the + * completion of the i/o. For guest OSes that use lowest priority message + * delivery mode (such as windows), we use this "hint" to deliver the + * completion action to the proper vcpu. For now, we can use the vcpuId of + * the processor that initiated the i/o as a likely candidate for the vcpu + * that will be waiting for the completion.. + * - bus should be 0: we currently only support bus 0 for now. + * - unused should be zero'd. + */ + +#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0) +#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1) +#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2) +#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3) +#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4) + +struct PVSCSIRingReqDesc { + u64 context; + u64 dataAddr; + u64 dataLen; + u64 senseAddr; + u32 senseLen; + u32 flags; + u8 cdb[16]; + u8 cdbLen; + u8 lun[8]; + u8 tag; + u8 bus; + u8 target; + u8 vcpuHint; + u8 unused[59]; +} __packed; + +/* + * Scatter-gather list management. + * + * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the + * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g + * table segment. + * + * - each segment of the s/g table contain a succession of struct + * PVSCSISGElement. + * - each segment is entirely contained on a single physical page of memory. + * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in + * PVSCSISGElement.flags and in this case: + * * addr is the PA of the next s/g segment, + * * length is undefined, assumed to be 0. + */ + +struct PVSCSISGElement { + u64 addr; + u32 length; + u32 flags; +} __packed; + +/* + * Completion descriptor. + * + * sizeof(RingCmpDesc) = 32 + * + * - context: identifier of the command. The same thing that was specified + * under "context" as part of struct RingReqDesc at initiation time, + * - dataLen: number of bytes transferred for the actual i/o operation, + * - senseLen: number of bytes written into the sense buffer, + * - hostStatus: adapter status, + * - scsiStatus: device status, + * - _pad should be zero. + */ + +struct PVSCSIRingCmpDesc { + u64 context; + u64 dataLen; + u32 senseLen; + u16 hostStatus; + u16 scsiStatus; + u32 _pad[2]; +} __packed; + +/* + * Interrupt status / IRQ bits. + */ + +#define PVSCSI_INTR_CMPL_0 (1 << 0) +#define PVSCSI_INTR_CMPL_1 (1 << 1) +#define PVSCSI_INTR_CMPL_MASK MASK(2) + +#define PVSCSI_INTR_MSG_0 (1 << 2) +#define PVSCSI_INTR_MSG_1 (1 << 3) +#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2) + +#define PVSCSI_INTR_ALL_SUPPORTED MASK(4) + +/* + * Number of MSI-X vectors supported. + */ +#define PVSCSI_MAX_INTRS 24 + +/* + * Enumeration of supported MSI-X vectors + */ +#define PVSCSI_VECTOR_COMPLETION 0 + +/* + * Misc constants for the rings. + */ + +#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES + +#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \ + (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc)) + +#define PVSCSI_MAX_REQ_QUEUE_DEPTH \ + (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE) + +#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2 + +enum PVSCSIMemSpace { + PVSCSI_MEM_SPACE_COMMAND_PAGE = 0, + PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1, + PVSCSI_MEM_SPACE_MISC_PAGE = 2, + PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4, + PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6, + PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7, +}; + +#define PVSCSI_MEM_SPACE_NUM_PAGES \ + (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \ + PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \ + PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MSIX_NUM_PAGES) + +#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE) + +#endif /* _VMW_PVSCSI_H_ */ -- cgit v1.2.3-59-g8ed1b From 1e49f78505b2c4df193614d774bf46d067cda7d8 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Thu, 29 Oct 2009 01:48:31 -0400 Subject: [SCSI] scsi_debug: fix Thin provisioning support While testing scsi_debug with these patches I found a problem with the Block Limits VPD page function. The length returned by the inquiry_evpd_b0() function was too short. A patch to fix that and a cosmetic change (that the form factor of scsi_debug is less than 1.8 inches) is attached. Signed-off-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index cb4bf16b4e66..0b575c871007 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -685,10 +685,12 @@ static int inquiry_evpd_89(unsigned char * arr) } +/* Block limits VPD page (SBC-3) */ static unsigned char vpdb0_data[] = { - /* from 4th byte */ 0,0,0,4, - 0,0,0x4,0, - 0,0,0,64, + /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, }; static int inquiry_evpd_b0(unsigned char * arr) @@ -731,11 +733,14 @@ static int inquiry_evpd_b0(unsigned char * arr) return sizeof(vpdb0_data); } +/* Block device characteristics VPD page (SBC-3) */ static int inquiry_evpd_b1(unsigned char *arr) { memset(arr, 0, 0x3c); arr[0] = 0; - arr[1] = 1; + arr[1] = 1; /* non rotating medium (e.g. solid state) */ + arr[2] = 0; + arr[3] = 5; /* less than 1.8" */ return 0x3c; } -- cgit v1.2.3-59-g8ed1b From f619106bdd9d197c947f07108af57946f19a7f7e Mon Sep 17 00:00:00 2001 From: adam radford Date: Fri, 23 Oct 2009 14:52:33 -0700 Subject: [SCSI] 3w-sas: Add new driver for LSI 3ware 9750 [jejb: fix up for new queue depth code] Signed-off-by: Adam Radford Signed-off-by: James Bottomley --- drivers/scsi/3w-sas.c | 1924 +++++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/3w-sas.h | 396 ++++++++++ drivers/scsi/Kconfig | 11 + drivers/scsi/Makefile | 1 + 4 files changed, 2332 insertions(+) create mode 100644 drivers/scsi/3w-sas.c create mode 100644 drivers/scsi/3w-sas.h diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c new file mode 100644 index 000000000000..4d314d740de4 --- /dev/null +++ b/drivers/scsi/3w-sas.c @@ -0,0 +1,1924 @@ +/* + 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2009 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Controllers supported by this driver: + + LSI 3ware 9750 6Gb/s SAS/SATA-RAID + + Bugs/Comments/Suggestions should be mailed to: + linuxraid@lsi.com + + For more information, goto: + http://www.lsi.com + + History + ------- + 3.26.02.000 - Initial driver release. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-sas.h" + +/* Globals */ +#define TW_DRIVER_VERSION "3.26.02.000" +static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT]; +static unsigned int twl_device_extension_count; +static int twl_major = -1; +extern struct timezone sys_tz; + +/* Module parameters */ +MODULE_AUTHOR ("LSI"); +MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(TW_DRIVER_VERSION); + +static int use_msi; +module_param(use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); + +/* Function prototypes */ +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset); + +/* Functions */ + +/* This function returns AENs through sysfs */ +static ssize_t twl_sysfs_aen_read(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *outbuf, loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; + unsigned long flags = 0; + ssize_t ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + return ret; +} /* End twl_sysfs_aen_read() */ + +/* aen_read sysfs attribute initializer */ +static struct bin_attribute twl_sysfs_aen_read_attr = { + .attr = { + .name = "3ware_aen_read", + .mode = S_IRUSR, + }, + .size = 0, + .read = twl_sysfs_aen_read +}; + +/* This function returns driver compatibility info through sysfs */ +static ssize_t twl_sysfs_compat_info(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *outbuf, loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; + unsigned long flags = 0; + ssize_t ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + return ret; +} /* End twl_sysfs_compat_info() */ + +/* compat_info sysfs attribute initializer */ +static struct bin_attribute twl_sysfs_compat_info_attr = { + .attr = { + .name = "3ware_compat_info", + .mode = S_IRUSR, + }, + .size = 0, + .read = twl_sysfs_compat_info +}; + +/* Show some statistics about the card */ +static ssize_t twl_show_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *host = class_to_shost(dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "AEN's: %4d\n", + TW_DRIVER_VERSION, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End twl_show_stats() */ + +/* This function will set a devices queue depth */ +static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) +{ + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + + if (queue_depth > TW_Q_LENGTH-2) + queue_depth = TW_Q_LENGTH-2; + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); + return queue_depth; +} /* End twl_change_queue_depth() */ + +/* stats sysfs attribute initializer */ +static struct device_attribute twl_host_stats_attr = { + .attr = { + .name = "3ware_stats", + .mode = S_IRUGO, + }, + .show = twl_show_stats +}; + +/* Host attributes initializer */ +static struct device_attribute *twl_host_attrs[] = { + &twl_host_stats_attr, + NULL, +}; + +/* This function will look up an AEN severity string */ +static char *twl_aen_severity_lookup(unsigned char severity_code) +{ + char *retval = NULL; + + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) + goto out; + + retval = twl_aen_severity_table[severity_code]; +out: + return retval; +} /* End twl_aen_severity_lookup() */ + +/* This function will queue an event */ +static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) +{ + u32 local_time; + struct timeval time; + TW_Event *event; + unsigned short aen; + char host[16]; + char *error_str; + + tw_dev->aen_count++; + + /* Fill out event info */ + event = tw_dev->event_queue[tw_dev->error_index]; + + host[0] = '\0'; + if (tw_dev->host) + sprintf(host, " scsi%d:", tw_dev->host->host_no); + + aen = le16_to_cpu(header->status_block.error); + memset(event, 0, sizeof(TW_Event)); + + event->severity = TW_SEV_OUT(header->status_block.severity__reserved); + do_gettimeofday(&time); + local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); + event->time_stamp_sec = local_time; + event->aen_code = aen; + event->retrieved = TW_AEN_NOT_RETRIEVED; + event->sequence_id = tw_dev->error_sequence_id; + tw_dev->error_sequence_id++; + + /* Check for embedded error string */ + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); + + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; + event->parameter_len = strlen(header->err_specific_desc); + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str)); + if (event->severity != TW_AEN_SEVERITY_DEBUG) + printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", + host, + twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str, + header->err_specific_desc); + else + tw_dev->aen_count--; + + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; +} /* End twl_aen_queue_event() */ + +/* This function will attempt to post a command packet to the board */ +static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) +{ + dma_addr_t command_que_value; + + command_que_value = tw_dev->command_packet_phys[request_id]; + command_que_value += TW_COMMAND_OFFSET; + + /* First write upper 4 bytes */ + writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev)); + /* Then the lower 4 bytes */ + writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev)); + + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + + return 0; +} /* End twl_post_command_packet() */ + +/* This function will perform a pci-dma mapping for a scatter gather list */ +static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) +{ + int use_sg; + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + + use_sg = scsi_dma_map(cmd); + if (!use_sg) + return 0; + else if (use_sg < 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); + return 0; + } + + cmd->SCp.phase = TW_PHASE_SGLIST; + cmd->SCp.have_data_in = use_sg; + + return use_sg; +} /* End twl_map_scsi_sg_data() */ + +/* This function hands scsi cdb's to the firmware */ +static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) +{ + TW_Command_Full *full_command_packet; + TW_Command_Apache *command_packet; + int i, sg_count; + struct scsi_cmnd *srb = NULL; + struct scatterlist *sglist = NULL, *sg; + int retval = 1; + + if (tw_dev->srb[request_id]) { + srb = tw_dev->srb[request_id]; + if (scsi_sglist(srb)) + sglist = scsi_sglist(srb); + } + + /* Initialize command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + full_command_packet->header.header_desc.size_header = 128; + full_command_packet->header.status_block.error = 0; + full_command_packet->header.status_block.severity__reserved = 0; + + command_packet = &full_command_packet->command.newcommand; + command_packet->status = 0; + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); + + /* We forced 16 byte cdb use earlier */ + if (!cdb) + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); + else + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); + + if (srb) { + command_packet->unit = srb->device->id; + command_packet->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); + } else { + command_packet->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); + command_packet->unit = 0; + } + + command_packet->sgl_offset = 16; + + if (!sglistarg) { + /* Map sglist from scsi layer to cmd packet */ + if (scsi_sg_count(srb)) { + sg_count = twl_map_scsi_sg_data(tw_dev, request_id); + if (sg_count == 0) + goto out; + + scsi_for_each_sg(srb, sg, sg_count, i) { + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg)); + } + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); + } + } else { + /* Internal cdb post */ + for (i = 0; i < use_sg; i++) { + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length); + } + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); + } + + /* Update some stats */ + if (srb) { + tw_dev->sector_count = scsi_bufflen(srb) / 512; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + tw_dev->sgl_entries = scsi_sg_count(srb); + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + } + + /* Now post the command to the board */ + retval = twl_post_command_packet(tw_dev, request_id); + +out: + return retval; +} /* End twl_scsiop_execute_scsi() */ + +/* This function will read the aen queue from the isr */ +static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry_ISO sglist[1]; + TW_Command_Full *full_command_packet; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command packet */ + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue"); + goto out; + } + retval = 0; +out: + return retval; +} /* End twl_aen_read_queue() */ + +/* This function will sync firmware time with the host time */ +static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) +{ + u32 schedulertime; + struct timeval utc; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + u32 local_time; + + /* Fill out the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + command_packet->request_id = request_id; + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); + command_packet->size = TW_COMMAND_SIZE; + command_packet->byte6_offset.parameter_count = cpu_to_le16(1); + + /* Setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ + param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ + param->parameter_size_bytes = cpu_to_le16(4); + + /* Convert system time in UTC to local time seconds since last + Sunday 12:00AM */ + do_gettimeofday(&utc); + local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); + schedulertime = local_time - (3 * 86400); + schedulertime = cpu_to_le32(schedulertime % 604800); + + memcpy(param->data, &schedulertime, sizeof(u32)); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command */ + twl_post_command_packet(tw_dev, request_id); +} /* End twl_aen_sync_time() */ + +/* This function will assign an available request id */ +static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End twl_get_request_id() */ + +/* This function will free a request id */ +static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End twl_free_request_id() */ + +/* This function will complete an aen request from the isr */ +static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int retval = 1; + + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + tw_dev->posted_request_count--; + aen = le16_to_cpu(header->status_block.error); + full_command_packet = tw_dev->command_packet_virt[request_id]; + command_packet = &full_command_packet->command.oldcommand; + + /* First check for internal completion of set param for time sync */ + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { + /* Keep reading the queue in case there are more aen's */ + if (twl_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + /* Quit reading the queue if this is the last one */ + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + twl_aen_sync_time(tw_dev, request_id); + retval = 0; + goto out; + default: + twl_aen_queue_event(tw_dev, header); + + /* If there are more aen's, keep reading the queue */ + if (twl_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + retval = 0; +out2: + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); +out: + return retval; +} /* End twl_aen_complete() */ + +/* This function will poll for a response */ +static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) +{ + unsigned long before; + dma_addr_t mfa; + u32 regh, regl; + u32 response; + int retval = 1; + int found = 0; + + before = jiffies; + + while (!found) { + if (sizeof(dma_addr_t) > 4) { + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + mfa = ((u64)regh << 32) | regl; + } else + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + + response = (u32)mfa; + + if (TW_RESID_OUT(response) == request_id) + found = 1; + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twl_poll_response() */ + +/* This function will drain the aen queue */ +static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) +{ + int request_id = 0; + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Entry_ISO sglist[1]; + int finished = 0, count = 0; + TW_Command_Full *full_command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int first_reset = 0, queue = 0, retval = 1; + + if (no_check_reset) + first_reset = 0; + else + first_reset = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + do { + /* Send command to the board */ + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense"); + goto out; + } + + /* Now poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue"); + tw_dev->posted_request_count--; + goto out; + } + + tw_dev->posted_request_count--; + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + aen = le16_to_cpu(header->status_block.error); + queue = 0; + count++; + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + if (first_reset != 1) + goto out; + else + finished = 1; + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) + first_reset = 1; + else + queue = 1; + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + break; + default: + queue = 1; + } + + /* Now queue an event info */ + if (queue) + twl_aen_queue_event(tw_dev, header); + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); + + if (count == TW_MAX_AEN_DRAIN) + goto out; + + retval = 0; +out: + tw_dev->state[request_id] = TW_S_INITIAL; + return retval; +} /* End twl_aen_drain_queue() */ + +/* This function will allocate memory and check if it is correctly aligned */ +static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr; + int retval = 1; + + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); + if (!cpu_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); + goto out; + } + + memset(cpu_addr, 0, size*TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + switch(which) { + case 0: + tw_dev->command_packet_phys[i] = dma_handle+(i*size); + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + case 2: + tw_dev->sense_buffer_phys[i] = dma_handle+(i*size); + tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size)); + break; + } + } + retval = 0; +out: + return retval; +} /* End twl_allocate_memory() */ + +/* This function will load the request id and various sgls for ioctls */ +static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) +{ + TW_Command *oldcommand; + TW_Command_Apache *newcommand; + TW_SG_Entry_ISO *sgl; + unsigned int pae = 0; + + if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) + pae = 1; + + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + newcommand = &full_command_packet->command.newcommand; + newcommand->request_id__lunl = + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); + if (length) { + newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); + newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); + } + newcommand->sgl_entries__lunh = + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); + } else { + oldcommand = &full_command_packet->command.oldcommand; + oldcommand->request_id = request_id; + + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { + /* Load the sg list */ + sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0)); + sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); + sgl->length = TW_CPU_TO_SGL(length); + oldcommand->size += pae; + oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0; + } + } +} /* End twl_load_sgl() */ + +/* This function handles ioctl for the character device + This interface is used by smartmontools open source software */ +static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +{ + long timeout; + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; + dma_addr_t dma_handle; + int request_id = 0; + TW_Ioctl_Driver_Command driver_command; + TW_Ioctl_Buf_Apache *tw_ioctl; + TW_Command_Full *full_command_packet; + TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; + int retval = -EFAULT; + void __user *argp = (void __user *)arg; + + /* Only let one of these through at a time */ + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { + retval = -EINTR; + goto out; + } + + /* First copy down the driver command */ + if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) + goto out2; + + /* Check data buffer size */ + if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { + retval = -EINVAL; + goto out2; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); + if (!cpu_addr) { + retval = -ENOMEM; + goto out2; + } + + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) + goto out3; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_IOCTL_FIRMWARE_PASS_THROUGH: + spin_lock_irqsave(tw_dev->host->host_lock, flags); + twl_get_request_id(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = NULL; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command; + + /* Load request id and sglist for both command types */ + twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); + + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); + + /* Now post the command packet to the controller */ + twl_post_command_packet(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for command to complete */ + timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* We timed out, and didn't get an interrupt */ + if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { + /* Now we need to reset the board */ + printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0x6, + cmd); + retval = -EIO; + twl_reset_device_extension(tw_dev, 1); + goto out3; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + default: + retval = -ENOTTY; + goto out3; + } + + /* Now copy the entire response to userspace */ + if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) + retval = 0; +out3: + /* Now free ioctl buf memory */ + dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); +out2: + mutex_unlock(&tw_dev->ioctl_lock); +out: + return retval; +} /* End twl_chrdev_ioctl() */ + +/* This function handles open for the character device */ +static int twl_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + int retval = -ENODEV; + + if (!capable(CAP_SYS_ADMIN)) { + retval = -EACCES; + goto out; + } + + cycle_kernel_lock(); + minor_number = iminor(inode); + if (minor_number >= twl_device_extension_count) + goto out; + retval = 0; +out: + return retval; +} /* End twl_chrdev_open() */ + +/* File operations struct for character device */ +static const struct file_operations twl_fops = { + .owner = THIS_MODULE, + .ioctl = twl_chrdev_ioctl, + .open = twl_chrdev_open, + .release = NULL +}; + +/* This function passes sense data from firmware to scsi layer */ +static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host) +{ + TW_Command_Apache_Header *header; + TW_Command_Full *full_command_packet; + unsigned short error; + char *error_str; + int retval = 1; + + header = tw_dev->sense_buffer_virt[i]; + full_command_packet = tw_dev->command_packet_virt[request_id]; + + /* Get embedded firmware error string */ + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]); + + /* Don't print error for Logical unit not supported during rollcall */ + error = le16_to_cpu(header->status_block.error); + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) { + if (print_host) + printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", + tw_dev->host->host_no, + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + header->status_block.error, + error_str, + header->err_specific_desc); + else + printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n", + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + header->status_block.error, + error_str, + header->err_specific_desc); + } + + if (copy_sense) { + memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH); + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); + goto out; + } +out: + return retval; +} /* End twl_fill_sense() */ + +/* This function will free up device extension resources */ +static void twl_free_device_extension(TW_Device_Extension *tw_dev) +{ + if (tw_dev->command_packet_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + sizeof(TW_Command_Full)*TW_Q_LENGTH, + tw_dev->command_packet_virt[0], + tw_dev->command_packet_phys[0]); + + if (tw_dev->generic_buffer_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + TW_SECTOR_SIZE*TW_Q_LENGTH, + tw_dev->generic_buffer_virt[0], + tw_dev->generic_buffer_phys[0]); + + if (tw_dev->sense_buffer_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + sizeof(TW_Command_Apache_Header)* + TW_Q_LENGTH, + tw_dev->sense_buffer_virt[0], + tw_dev->sense_buffer_phys[0]); + + kfree(tw_dev->event_queue[0]); +} /* End twl_free_device_extension() */ + +/* This function will get parameter table entries from the firmware */ +static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + void *retval = NULL; + + /* Setup the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = TW_COMMAND_SIZE; + command_packet->request_id = request_id; + command_packet->byte6_offset.block_count = cpu_to_le16(1); + + /* Now setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = cpu_to_le16(table_id | 0x8000); + param->parameter_id = cpu_to_le16(parameter_id); + param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); + + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); + + /* Post the command packet to the board */ + twl_post_command_packet(tw_dev, request_id); + + /* Poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param") + else + retval = (void *)&(param->data[0]); + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twl_get_param() */ + +/* This function will send an initconnection command to controller */ +static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result) +{ + TW_Command_Full *full_command_packet; + TW_Initconnect *tw_initconnect; + int request_id = 0, retval = 1; + + /* Initialize InitConnection command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + full_command_packet->header.header_desc.size_header = 128; + + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); + tw_initconnect->request_id = request_id; + tw_initconnect->message_credits = cpu_to_le16(message_credits); + tw_initconnect->features = set_features; + + /* Turn on 64-bit sgl support if we need to */ + tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; + + tw_initconnect->features = cpu_to_le32(tw_initconnect->features); + + if (set_features & TW_EXTENDED_INIT_CONNECT) { + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; + tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); + tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); + tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); + tw_initconnect->fw_build = cpu_to_le16(current_fw_build); + } else + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; + + /* Send command packet to the board */ + twl_post_command_packet(tw_dev, request_id); + + /* Poll for completion */ + if (twl_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection"); + } else { + if (set_features & TW_EXTENDED_INIT_CONNECT) { + *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); + *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); + *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); + *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); + *init_connect_result = le32_to_cpu(tw_initconnect->result); + } + retval = 0; + } + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twl_initconnection() */ + +/* This function will initialize the fields of a device extension */ +static int twl_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, retval = 1; + + /* Initialize command packet buffers */ + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed"); + goto out; + } + + /* Initialize generic buffer */ + if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed"); + goto out; + } + + /* Allocate sense buffers */ + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed"); + goto out; + } + + /* Allocate event info space */ + tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); + if (!tw_dev->event_queue[0]) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed"); + goto out; + } + + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->error_sequence_id = 1; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + mutex_init(&tw_dev->ioctl_lock); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + retval = 0; +out: + return retval; +} /* End twl_initialize_device_extension() */ + +/* This function will perform a pci-dma unmap */ +static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) +{ + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + + if (cmd->SCp.phase == TW_PHASE_SGLIST) + scsi_dma_unmap(cmd); +} /* End twl_unmap_scsi_data() */ + +/* This function will handle attention interrupts */ +static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) +{ + int retval = 1; + u32 request_id, doorbell; + + /* Read doorbell status */ + doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev)); + + /* Check for controller errors */ + if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing"); + goto out; + } + + /* Check if we need to perform an AEN drain */ + if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) { + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { + twl_get_request_id(tw_dev, &request_id); + if (twl_aen_read_queue(tw_dev, request_id)) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); + } + } + } + + retval = 0; +out: + /* Clear doorbell interrupt */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + /* Make sure the clear was flushed by reading it back */ + readl(TWL_HOBDBC_REG_ADDR(tw_dev)); + + return retval; +} /* End twl_handle_attention_interrupt() */ + +/* Interrupt service routine */ +static irqreturn_t twl_interrupt(int irq, void *dev_instance) +{ + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + int i, handled = 0, error = 0; + dma_addr_t mfa = 0; + u32 reg, regl, regh, response, request_id = 0; + struct scsi_cmnd *cmd; + TW_Command_Full *full_command_packet; + + spin_lock(tw_dev->host->host_lock); + + /* Read host interrupt status */ + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(reg & TWL_HISTATUS_VALID_INTERRUPT)) + goto twl_interrupt_bail; + + handled = 1; + + /* If we are resetting, bail */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) + goto twl_interrupt_bail; + + /* Attention interrupt */ + if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) { + if (twl_handle_attention_interrupt(tw_dev)) { + TWL_MASK_INTERRUPTS(tw_dev); + goto twl_interrupt_bail; + } + } + + /* Response interrupt */ + while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) { + if (sizeof(dma_addr_t) > 4) { + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + mfa = ((u64)regh << 32) | regl; + } else + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); + + error = 0; + response = (u32)mfa; + + /* Check for command packet error */ + if (!TW_NOTMFA_OUT(response)) { + for (i=0;isense_buffer_phys[i] == mfa) { + request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id); + if (tw_dev->srb[request_id] != NULL) + error = twl_fill_sense(tw_dev, i, request_id, 1, 1); + else { + /* Skip ioctl error prints */ + if (request_id != tw_dev->chrdev_request_id) + error = twl_fill_sense(tw_dev, i, request_id, 0, 1); + else + memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header)); + } + + /* Now re-post the sense buffer */ + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); + break; + } + } + } else + request_id = TW_RESID_OUT(response); + + full_command_packet = tw_dev->command_packet_virt[request_id]; + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != NULL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted"); + TWL_MASK_INTERRUPTS(tw_dev); + goto twl_interrupt_bail; + } + } + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == NULL) { + if (request_id != tw_dev->chrdev_request_id) { + if (twl_aen_complete(tw_dev, request_id)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt"); + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + cmd = tw_dev->srb[request_id]; + + if (!error) + cmd->result = (DID_OK << 16); + + /* Report residual bytes for single sgl */ + if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { + if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) + scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); + } + + /* Now complete the io */ + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); + twl_unmap_scsi_data(tw_dev, request_id); + } + + /* Check for another response interrupt */ + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); + } + +twl_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End twl_interrupt() */ + +/* This function will poll for a register change */ +static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds) +{ + unsigned long before; + int retval = 1; + u32 reg_value; + + reg_value = readl(reg); + before = jiffies; + + while ((reg_value & value) != result) { + reg_value = readl(reg); + if (time_after(jiffies, before + HZ * seconds)) + goto out; + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twl_poll_register() */ + +/* This function will reset a controller */ +static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) +{ + int retval = 1; + int i = 0; + u32 status = 0; + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; + u32 init_connect_result = 0; + int tries = 0; + int do_soft_reset = soft_reset; + + while (tries < TW_MAX_RESET_TRIES) { + /* Do a soft reset if one is needed */ + if (do_soft_reset) { + TWL_SOFT_RESET(tw_dev); + + /* Make sure controller is in a good state */ + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence"); + tries++; + continue; + } + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence"); + tries++; + continue; + } + } + + /* Initconnect */ + if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, + TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, + TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, + &fw_on_ctlr_build, &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Load sense buffers */ + while (i < TW_Q_LENGTH) { + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); + + /* Check status for over-run after each write */ + status = readl(TWL_STATUS_REG_ADDR(tw_dev)); + if (!(status & TWL_STATUS_OVERRUN_SUBMIT)) + i++; + } + + /* Now check status */ + status = readl(TWL_STATUS_REG_ADDR(tw_dev)); + if (status) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Drain the AEN queue */ + if (twl_aen_drain_queue(tw_dev, soft_reset)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Load rest of compatibility struct */ + strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); + tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; + tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; + tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; + tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; + tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; + tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; + tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; + tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; + tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; + + /* If we got here, controller is in a good state */ + retval = 0; + goto out; + } +out: + return retval; +} /* End twl_reset_sequence() */ + +/* This function will reset a device extension */ +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset) +{ + int i = 0, retval = 1; + unsigned long flags = 0; + + /* Block SCSI requests while we are resetting */ + if (ioctl_reset) + scsi_block_requests(tw_dev->host); + + set_bit(TW_IN_RESET, &tw_dev->flags); + TWL_MASK_INTERRUPTS(tw_dev); + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + + /* Abort all requests that are in progress */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + if (tw_dev->srb[i]) { + tw_dev->srb[i]->result = (DID_RESET << 16); + tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); + twl_unmap_scsi_data(tw_dev, i); + } + } + } + + /* Reset queues and counts */ + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + if (twl_reset_sequence(tw_dev, 1)) + goto out; + + TWL_UNMASK_INTERRUPTS(tw_dev); + + clear_bit(TW_IN_RESET, &tw_dev->flags); + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + retval = 0; +out: + if (ioctl_reset) + scsi_unblock_requests(tw_dev->host); + return retval; +} /* End twl_reset_device_extension() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads, sectors; + TW_Device_Extension *tw_dev; + + tw_dev = (TW_Device_Extension *)sdev->host->hostdata; + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + } else { + heads = 64; + sectors = 32; + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ + + return 0; +} /* End twl_scsi_biosparam() */ + +/* This is the new scsi eh reset function */ +static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + tw_dev->num_resets++; + + sdev_printk(KERN_WARNING, SCpnt->device, + "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", + TW_DRIVER, 0x2c, SCpnt->cmnd[0]); + + /* Make sure we are not issuing an ioctl or resetting from ioctl */ + mutex_lock(&tw_dev->ioctl_lock); + + /* Now reset the card and some of the device extension data */ + if (twl_reset_device_extension(tw_dev, 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset"); + goto out; + } + + retval = SUCCESS; +out: + mutex_unlock(&tw_dev->ioctl_lock); + return retval; +} /* End twl_scsi_eh_reset() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +{ + int request_id, retval; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* If we are resetting due to timed out ioctl, report as busy */ + if (test_bit(TW_IN_RESET, &tw_dev->flags)) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Save done function into scsi_cmnd struct */ + SCpnt->scsi_done = done; + + /* Get a free request id */ + twl_get_request_id(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + /* Initialize phase to zero */ + SCpnt->SCp.phase = TW_PHASE_INITIAL; + + retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + if (retval) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twl_free_request_id(tw_dev, request_id); + SCpnt->result = (DID_ERROR << 16); + done(SCpnt); + retval = 0; + } +out: + return retval; +} /* End twl_scsi_queue() */ + +/* This function tells the controller to shut down */ +static void __twl_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TWL_MASK_INTERRUPTS(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed"); + } else { + printk(KERN_WARNING "3w-sas: Shutdown complete.\n"); + } + + /* Clear doorbell interrupt just before exit */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); +} /* End __twl_shutdown() */ + +/* Wrapper for __twl_shutdown */ +static void twl_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev; + + if (!host) + return; + + tw_dev = (TW_Device_Extension *)host->hostdata; + + if (tw_dev->online) + __twl_shutdown(tw_dev); +} /* End twl_shutdown() */ + +/* This function configures unit settings when a unit is coming on-line */ +static int twl_slave_configure(struct scsi_device *sdev) +{ + /* Force 60 second timeout */ + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + + return 0; +} /* End twl_slave_configure() */ + +/* scsi_host_template initializer */ +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3w-sas", + .queuecommand = twl_scsi_queue, + .eh_host_reset_handler = twl_scsi_eh_reset, + .bios_param = twl_scsi_biosparam, + .change_queue_depth = twl_change_queue_depth, + .can_queue = TW_Q_LENGTH-2, + .slave_configure = twl_slave_configure, + .this_id = -1, + .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = twl_host_attrs, + .emulated = 1 +}; + +/* This function will probe and initialize a card */ +static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + int retval = -ENODEV; + int *ptr_phycount, phycount=0; + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device"); + goto out_disable_device; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); + retval = -ENODEV; + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension"); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = shost_priv(host); + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (twl_initialize_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-sas"); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region"); + goto out_free_device_extension; + } + + /* Save base address, use region 1 */ + tw_dev->base_addr = pci_iomap(pdev, 1, 0); + if (!tw_dev->base_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TWL_MASK_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (twl_reset_sequence(tw_dev, 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); + goto out_iounmap; + } + + /* Set host specific parameters */ + host->max_id = TW_MAX_UNITS; + host->max_cmd_len = TW_MAX_CDB_LEN; + host->max_lun = TW_MAX_LUNS; + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed"); + goto out_iounmap; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n", + host->host_no, + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH), + (u64)pci_resource_start(pdev, 1), pdev->irq); + + ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE, + TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH); + if (ptr_phycount) + phycount = le32_to_cpu(*(int *)ptr_phycount); + + printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n", + host->host_no, + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), + (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE, + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), + phycount); + + /* Try to enable MSI */ + if (use_msi && !pci_enable_msi(pdev)) + set_bit(TW_USING_MSI, &tw_dev->flags); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ"); + goto out_remove_host; + } + + twl_device_extension_list[twl_device_extension_count] = tw_dev; + twl_device_extension_count++; + + /* Re-enable interrupts on the card */ + TWL_UNMASK_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + /* Add sysfs binary files */ + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read"); + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info"); + + if (twl_major == -1) { + if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0) + TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device"); + } + tw_dev->online = 1; + return 0; + +out_remove_host: + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + scsi_remove_host(host); +out_iounmap: + iounmap(tw_dev->base_addr); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + twl_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End twl_probe() */ + +/* This function is called to remove a device */ +static void twl_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev; + + if (!host) + return; + + tw_dev = (TW_Device_Extension *)host->hostdata; + + if (!tw_dev->online) + return; + + /* Remove sysfs binary files */ + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr); + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr); + + scsi_remove_host(tw_dev->host); + + /* Unregister character device */ + if (twl_major >= 0) { + unregister_chrdev(twl_major, "twl"); + twl_major = -1; + } + + /* Shutdown the card */ + __twl_shutdown(tw_dev); + + /* Disable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_disable_msi(pdev); + + /* Free IO remapping */ + iounmap(tw_dev->base_addr); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + twl_free_device_extension(tw_dev); + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + twl_device_extension_count--; +} /* End twl_remove() */ + +#ifdef CONFIG_PM +/* This function is called on PCI suspend */ +static int twl_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no); + /* Disable interrupts */ + TWL_MASK_INTERRUPTS(tw_dev); + + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + /* Tell the card we are shutting down */ + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend"); + } else { + printk(KERN_WARNING "3w-sas: Suspend complete.\n"); + } + + /* Clear doorbell interrupt */ + TWL_CLEAR_DB_INTERRUPT(tw_dev); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} /* End twl_suspend() */ + +/* This function is called on PCI resume */ +static int twl_resume(struct pci_dev *pdev) +{ + int retval = 0; + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume"); + return retval; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Initialize the card */ + if (twl_reset_sequence(tw_dev, 0)) { + retval = -ENODEV; + goto out_disable_device; + } + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume"); + retval = -ENODEV; + goto out_disable_device; + } + + /* Now enable MSI if enabled */ + if (test_bit(TW_USING_MSI, &tw_dev->flags)) + pci_enable_msi(pdev); + + /* Re-enable interrupts on the card */ + TWL_UNMASK_INTERRUPTS(tw_dev); + + printk(KERN_WARNING "3w-sas: Resume complete.\n"); + return 0; + +out_disable_device: + scsi_remove_host(host); + pci_disable_device(pdev); + + return retval; +} /* End twl_resume() */ +#endif + +/* PCI Devices supported by this driver */ +static struct pci_device_id twl_pci_tbl[] __devinitdata = { + { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) }, + { } +}; +MODULE_DEVICE_TABLE(pci, twl_pci_tbl); + +/* pci_driver initializer */ +static struct pci_driver twl_driver = { + .name = "3w-sas", + .id_table = twl_pci_tbl, + .probe = twl_probe, + .remove = twl_remove, +#ifdef CONFIG_PM + .suspend = twl_suspend, + .resume = twl_resume, +#endif + .shutdown = twl_shutdown +}; + +/* This function is called on driver initialization */ +static int __init twl_init(void) +{ + printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); + + return pci_register_driver(&twl_driver); +} /* End twl_init() */ + +/* This function is called on driver exit */ +static void __exit twl_exit(void) +{ + pci_unregister_driver(&twl_driver); +} /* End twl_exit() */ + +module_init(twl_init); +module_exit(twl_exit); + diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h new file mode 100644 index 000000000000..d474892701d4 --- /dev/null +++ b/drivers/scsi/3w-sas.h @@ -0,0 +1,396 @@ +/* + 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2009 LSI Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + linuxraid@lsi.com + + For more information, goto: + http://www.lsi.com +*/ + +#ifndef _3W_SAS_H +#define _3W_SAS_H + +/* AEN severity table */ +static char *twl_aen_severity_table[] = +{ + "None", "ERROR", "WARNING", "INFO", "DEBUG", NULL +}; + +/* Liberator register offsets */ +#define TWL_STATUS 0x0 /* Status */ +#define TWL_HIBDB 0x20 /* Inbound doorbell */ +#define TWL_HISTAT 0x30 /* Host interrupt status */ +#define TWL_HIMASK 0x34 /* Host interrupt mask */ +#define TWL_HOBDB 0x9C /* Outbound doorbell */ +#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */ +#define TWL_SCRPD3 0xBC /* Scratchpad */ +#define TWL_HIBQPL 0xC0 /* Host inbound Q low */ +#define TWL_HIBQPH 0xC4 /* Host inbound Q high */ +#define TWL_HOBQPL 0xC8 /* Host outbound Q low */ +#define TWL_HOBQPH 0xCC /* Host outbound Q high */ +#define TWL_HISTATUS_VALID_INTERRUPT 0xC +#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4 +#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8 +#define TWL_STATUS_OVERRUN_SUBMIT 0x2000 +#define TWL_ISSUE_SOFT_RESET 0x100 +#define TWL_CONTROLLER_READY 0x2000 +#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000 +#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000 +#define TWL_PULL_MODE 0x1 + +/* Command packet opcodes used by the driver */ +#define TW_OP_INIT_CONNECTION 0x1 +#define TW_OP_GET_PARAM 0x12 +#define TW_OP_SET_PARAM 0x13 +#define TW_OP_EXECUTE_SCSI 0x10 + +/* Asynchronous Event Notification (AEN) codes used by the driver */ +#define TW_AEN_QUEUE_EMPTY 0x0000 +#define TW_AEN_SOFT_RESET 0x0001 +#define TW_AEN_SYNC_TIME_WITH_HOST 0x031 +#define TW_AEN_SEVERITY_ERROR 0x1 +#define TW_AEN_SEVERITY_DEBUG 0x4 +#define TW_AEN_NOT_RETRIEVED 0x1 + +/* Command state defines */ +#define TW_S_INITIAL 0x1 /* Initial state */ +#define TW_S_STARTED 0x2 /* Id in use */ +#define TW_S_POSTED 0x4 /* Posted to the controller */ +#define TW_S_COMPLETED 0x8 /* Completed by isr */ +#define TW_S_FINISHED 0x10 /* I/O completely done */ + +/* Compatibility defines */ +#define TW_9750_ARCH_ID 10 +#define TW_CURRENT_DRIVER_SRL 40 +#define TW_CURRENT_DRIVER_BUILD 0 +#define TW_CURRENT_DRIVER_BRANCH 0 + +/* Phase defines */ +#define TW_PHASE_INITIAL 0 +#define TW_PHASE_SGLIST 2 + +/* Misc defines */ +#define TW_SECTOR_SIZE 512 +#define TW_MAX_UNITS 32 +#define TW_INIT_MESSAGE_CREDITS 0x100 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3 +#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 +#define TW_EXTENDED_INIT_CONNECT 0x2 +#define TW_BASE_FW_SRL 24 +#define TW_BASE_FW_BRANCH 0 +#define TW_BASE_FW_BUILD 1 +#define TW_Q_LENGTH 256 +#define TW_Q_START 0 +#define TW_MAX_SLOT 32 +#define TW_MAX_RESET_TRIES 2 +#define TW_MAX_CMDS_PER_LUN 254 +#define TW_MAX_AEN_DRAIN 255 +#define TW_IN_RESET 2 +#define TW_USING_MSI 3 +#define TW_IN_ATTENTION_LOOP 4 +#define TW_MAX_SECTORS 256 +#define TW_MAX_CDB_LEN 16 +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ +#define TW_IOCTL_CHRDEV_FREE -1 +#define TW_COMMAND_OFFSET 128 /* 128 bytes */ +#define TW_VERSION_TABLE 0x0402 +#define TW_TIMEKEEP_TABLE 0x040A +#define TW_INFORMATION_TABLE 0x0403 +#define TW_PARAM_FWVER 3 +#define TW_PARAM_FWVER_LENGTH 16 +#define TW_PARAM_BIOSVER 4 +#define TW_PARAM_BIOSVER_LENGTH 16 +#define TW_PARAM_MODEL 8 +#define TW_PARAM_MODEL_LENGTH 16 +#define TW_PARAM_PHY_SUMMARY_TABLE 1 +#define TW_PARAM_PHYCOUNT 2 +#define TW_PARAM_PHYCOUNT_LENGTH 1 +#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools +#define TW_ALLOCATION_LENGTH 128 +#define TW_SENSE_DATA_LENGTH 18 +#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a +#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d +#define TW_ERROR_UNIT_OFFLINE 0x128 +#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 +#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 +#define TW_DRIVER 6 +#ifndef PCI_DEVICE_ID_3WARE_9750 +#define PCI_DEVICE_ID_3WARE_9750 0x1010 +#endif + +/* Bitmask macros to eliminate bitfields */ + +/* opcode: 5, reserved: 3 */ +#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_OP_OUT(x) (x & 0x1f) + +/* opcode: 5, sgloffset: 3 */ +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_SGL_OUT(x) ((x >> 5) & 0x7) + +/* severity: 3, reserved: 5 */ +#define TW_SEV_OUT(x) (x & 0x7) + +/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */ +#define TW_RESID_OUT(x) ((x >> 16) & 0xffff) +#define TW_NOTMFA_OUT(x) (x & 0x1) + +/* request_id: 12, lun: 4 */ +#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff)) +#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf) + +/* Register access macros */ +#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS) +#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL) +#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH) +#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB) +#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC) +#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK) +#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT) +#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH) +#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL) +#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB) +#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3) +#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev))) +#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev))) +#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev))) +#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev))) + +/* Macros */ +#define TW_PRINTK(h,a,b,c) { \ +if (h) \ +printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ +else \ +printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ +} +#define TW_MAX_LUNS 16 +#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4) +#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92) +#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94) +#define TW_PADDING_LENGTH_LIBERATOR 136 +#define TW_PADDING_LENGTH_LIBERATOR_OLD 132 +#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x)) + +#pragma pack(1) + +/* SGL entry */ +typedef struct TAG_TW_SG_Entry_ISO { + dma_addr_t address; + dma_addr_t length; +} TW_SG_Entry_ISO; + +/* Old Command Packet with ISO SGL */ +typedef struct TW_Command { + unsigned char opcode__sgloffset; + unsigned char size; + unsigned char request_id; + unsigned char unit__hostid; + /* Second DWORD */ + unsigned char status; + unsigned char flags; + union { + unsigned short block_count; + unsigned short parameter_count; + } byte6_offset; + union { + struct { + u32 lba; + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD]; + } io; + struct { + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; + u32 padding; + unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD]; + } param; + } byte8_offset; +} TW_Command; + +/* New Command Packet with ISO SGL */ +typedef struct TAG_TW_Command_Apache { + unsigned char opcode__reserved; + unsigned char unit; + unsigned short request_id__lunl; + unsigned char status; + unsigned char sgl_offset; + unsigned short sgl_entries__lunh; + unsigned char cdb[16]; + TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH]; + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR]; +} TW_Command_Apache; + +/* New command packet header */ +typedef struct TAG_TW_Command_Apache_Header { + unsigned char sense_data[TW_SENSE_DATA_LENGTH]; + struct { + char reserved[4]; + unsigned short error; + unsigned char padding; + unsigned char severity__reserved; + } status_block; + unsigned char err_specific_desc[98]; + struct { + unsigned char size_header; + unsigned short request_id; + unsigned char size_sense; + } header_desc; +} TW_Command_Apache_Header; + +/* This struct is a union of the 2 command packets */ +typedef struct TAG_TW_Command_Full { + TW_Command_Apache_Header header; + union { + TW_Command oldcommand; + TW_Command_Apache newcommand; + } command; +} TW_Command_Full; + +/* Initconnection structure */ +typedef struct TAG_TW_Initconnect { + unsigned char opcode__reserved; + unsigned char size; + unsigned char request_id; + unsigned char res2; + unsigned char status; + unsigned char flags; + unsigned short message_credits; + u32 features; + unsigned short fw_srl; + unsigned short fw_arch_id; + unsigned short fw_branch; + unsigned short fw_build; + u32 result; +} TW_Initconnect; + +/* Event info structure */ +typedef struct TAG_TW_Event +{ + unsigned int sequence_id; + unsigned int time_stamp_sec; + unsigned short aen_code; + unsigned char severity; + unsigned char retrieved; + unsigned char repeat_count; + unsigned char parameter_len; + unsigned char parameter_data[98]; +} TW_Event; + +typedef struct TAG_TW_Ioctl_Driver_Command { + unsigned int control_code; + unsigned int status; + unsigned int unique_id; + unsigned int sequence_id; + unsigned int os_specific; + unsigned int buffer_length; +} TW_Ioctl_Driver_Command; + +typedef struct TAG_TW_Ioctl_Apache { + TW_Ioctl_Driver_Command driver_command; + char padding[488]; + TW_Command_Full firmware_command; + char data_buffer[1]; +} TW_Ioctl_Buf_Apache; + +/* GetParam descriptor */ +typedef struct { + unsigned short table_id; + unsigned short parameter_id; + unsigned short parameter_size_bytes; + unsigned short actual_parameter_size_bytes; + unsigned char data[1]; +} TW_Param_Apache; + +/* Compatibility information structure */ +typedef struct TAG_TW_Compatibility_Info +{ + char driver_version[32]; + unsigned short working_srl; + unsigned short working_branch; + unsigned short working_build; + unsigned short driver_srl_high; + unsigned short driver_branch_high; + unsigned short driver_build_high; + unsigned short driver_srl_low; + unsigned short driver_branch_low; + unsigned short driver_build_low; + unsigned short fw_on_ctlr_srl; + unsigned short fw_on_ctlr_branch; + unsigned short fw_on_ctlr_build; +} TW_Compatibility_Info; + +#pragma pack() + +typedef struct TAG_TW_Device_Extension { + void __iomem *base_addr; + unsigned long *generic_buffer_virt[TW_Q_LENGTH]; + dma_addr_t generic_buffer_phys[TW_Q_LENGTH]; + TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; + dma_addr_t command_packet_phys[TW_Q_LENGTH]; + TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH]; + dma_addr_t sense_buffer_phys[TW_Q_LENGTH]; + struct pci_dev *tw_pci_dev; + struct scsi_cmnd *srb[TW_Q_LENGTH]; + unsigned char free_queue[TW_Q_LENGTH]; + unsigned char free_head; + unsigned char free_tail; + int state[TW_Q_LENGTH]; + unsigned int posted_request_count; + unsigned int max_posted_request_count; + unsigned int max_sgl_entries; + unsigned int sgl_entries; + unsigned int num_resets; + unsigned int sector_count; + unsigned int max_sector_count; + unsigned int aen_count; + struct Scsi_Host *host; + long flags; + TW_Event *event_queue[TW_Q_LENGTH]; + unsigned char error_index; + unsigned int error_sequence_id; + int chrdev_request_id; + wait_queue_head_t ioctl_wqueue; + struct mutex ioctl_lock; + TW_Compatibility_Info tw_compat_info; + char online; +} TW_Device_Extension; + +#endif /* _3W_SAS_H */ + diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 1895259fff0f..b4d8d63a34b2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -399,6 +399,17 @@ config SCSI_3W_9XXX Please read the comments at the top of . +config SCSI_3W_SAS + tristate "3ware 97xx SAS/SATA-RAID support" + depends on PCI && SCSI + help + This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards. + + + + Please read the comments at the top of + . + config SCSI_7000FASST tristate "7000FASST SCSI support" depends on ISA && SCSI && ISA_DMA_API diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 5026bdc7b2b7..280d3c657d60 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -114,6 +114,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o +obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o obj-$(CONFIG_SCSI_PPA) += ppa.o obj-$(CONFIG_SCSI_IMM) += imm.o obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o -- cgit v1.2.3-59-g8ed1b From 03b147083a2f9a2a3fbbd2505fa88ffa3c6ab194 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 23 Sep 2009 16:15:35 +0200 Subject: [SCSI] scsi_lib: fix potential NULL dereference Stanse found a potential NULL dereference in scsi_kill_request. Instead of triggering BUG() in 'if (unlikely(cmd == NULL))' branch, the kernel will Oops earlier on cmd dereference. Move the dereferences after the if. Signed-off-by: Jiri Slaby Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 108655230b59..e495d3813948 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1359,9 +1359,9 @@ static int scsi_lld_busy(struct request_queue *q) static void scsi_kill_request(struct request *req, struct request_queue *q) { struct scsi_cmnd *cmd = req->special; - struct scsi_device *sdev = cmd->device; - struct scsi_target *starget = scsi_target(sdev); - struct Scsi_Host *shost = sdev->host; + struct scsi_device *sdev; + struct scsi_target *starget; + struct Scsi_Host *shost; blk_start_request(req); @@ -1371,6 +1371,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) BUG(); } + sdev = cmd->device; + starget = scsi_target(sdev); + shost = sdev->host; scsi_init_cmd_errh(cmd); cmd->result = DID_NO_CONNECT << 16; atomic_inc(&cmd->device->iorequest_cnt); -- cgit v1.2.3-59-g8ed1b From 65d430fa99cbd0e88d09a3343f697c51fc8a7009 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Fri, 30 Oct 2009 17:59:29 +0100 Subject: [SCSI] scsi_transport_fc: Introduce helper function for blocking scsi_eh Move the duplicated code from FC LLDs to SCSI FC transport class. Acked-by: James Smart Acked-by: Giridhar Malavali Acked-by: Abhijeet Joglekar Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_scsi.c | 20 ++------------------ drivers/scsi/lpfc/lpfc_scsi.c | 30 ++++-------------------------- drivers/scsi/qla2xxx/qla_os.c | 25 ++++--------------------- drivers/scsi/scsi_transport_fc.c | 26 ++++++++++++++++++++++++++ include/scsi/scsi_transport_fc.h | 1 + 5 files changed, 37 insertions(+), 65 deletions(-) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index b5d17385939b..8d26d7a9f01b 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1225,22 +1225,6 @@ void fnic_terminate_rport_io(struct fc_rport *rport) } -static void fnic_block_error_handler(struct scsi_cmnd *sc) -{ - struct Scsi_Host *shost = sc->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); - unsigned long flags; - - spin_lock_irqsave(shost->host_lock, flags); - while (rport->port_state == FC_PORTSTATE_BLOCKED) { - spin_unlock_irqrestore(shost->host_lock, flags); - msleep(1000); - spin_lock_irqsave(shost->host_lock, flags); - } - spin_unlock_irqrestore(shost->host_lock, flags); - -} - /* * This function is exported to SCSI for sending abort cmnds. * A SCSI IO is represented by a io_req in the driver. @@ -1260,7 +1244,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ - fnic_block_error_handler(sc); + fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); @@ -1542,7 +1526,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ - fnic_block_error_handler(sc); + fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c88f59f0ce30..e25179193a82 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2916,28 +2916,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } -/** - * lpfc_block_error_handler - Routine to block error handler - * @cmnd: Pointer to scsi_cmnd data structure. - * - * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD. - **/ -static void -lpfc_block_error_handler(struct scsi_cmnd *cmnd) -{ - struct Scsi_Host *shost = cmnd->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - - spin_lock_irq(shost->host_lock); - while (rport->port_state == FC_PORTSTATE_BLOCKED) { - spin_unlock_irq(shost->host_lock); - msleep(1000); - spin_lock_irq(shost->host_lock); - } - spin_unlock_irq(shost->host_lock); - return; -} - /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point * @cmnd: Pointer to scsi_cmnd data structure. @@ -2961,7 +2939,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) int ret = SUCCESS; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); - lpfc_block_error_handler(cmnd); + fc_block_scsi_eh(cmnd); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; BUG_ON(!lpfc_cmd); @@ -3259,7 +3237,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_event_header scsi_event; int status; - lpfc_block_error_handler(cmnd); + fc_block_scsi_eh(cmnd); status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { @@ -3318,7 +3296,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_event_header scsi_event; int status; - lpfc_block_error_handler(cmnd); + fc_block_scsi_eh(cmnd); status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { @@ -3384,7 +3362,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); - lpfc_block_error_handler(cmnd); + fc_block_scsi_eh(cmnd); /* * Since the driver manages a single bus device, reset all diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d69744a62fe4..41669357b186 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -728,23 +728,6 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) spin_unlock_irqrestore(&ha->hardware_lock, flags); } -static void -qla2x00_block_error_handler(struct scsi_cmnd *cmnd) -{ - struct Scsi_Host *shost = cmnd->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - unsigned long flags; - - spin_lock_irqsave(shost->host_lock, flags); - while (rport->port_state == FC_PORTSTATE_BLOCKED) { - spin_unlock_irqrestore(shost->host_lock, flags); - msleep(1000); - spin_lock_irqsave(shost->host_lock, flags); - } - spin_unlock_irqrestore(shost->host_lock, flags); - return; -} - /************************************************************************** * qla2xxx_eh_abort * @@ -774,7 +757,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) struct req_que *req = vha->req; srb_t *spt; - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); if (!CMD_SP(cmd)) return SUCCESS; @@ -905,7 +888,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int err; - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); if (!fcport) return FAILED; @@ -985,7 +968,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) unsigned long serial; srb_t *sp = (srb_t *) CMD_SP(cmd); - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; @@ -1048,7 +1031,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) srb_t *sp = (srb_t *) CMD_SP(cmd); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - qla2x00_block_error_handler(cmd); + fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index f436e033adaf..3ce56b3b2cd7 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -27,6 +27,7 @@ */ #include #include +#include #include #include #include @@ -3144,6 +3145,31 @@ fc_scsi_scan_rport(struct work_struct *work) spin_unlock_irqrestore(shost->host_lock, flags); } +/** + * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport + * @cmnd: SCSI command that scsi_eh is trying to recover + * + * This routine can be called from a FC LLD scsi_eh callback. It + * blocks the scsi_eh thread until the fc_rport leaves the + * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh + * failing recovery actions for blocked rports which would lead to + * offlined SCSI devices. + */ +void fc_block_scsi_eh(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + while (rport->port_state == FC_PORTSTATE_BLOCKED) { + spin_unlock_irqrestore(shost->host_lock, flags); + msleep(1000); + spin_lock_irqsave(shost->host_lock, flags); + } + spin_unlock_irqrestore(shost->host_lock, flags); +} +EXPORT_SYMBOL(fc_block_scsi_eh); /** * fc_vport_setup - allocates and creates a FC virtual port. diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index fc50bd64aa4e..8e86a94faf06 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -807,5 +807,6 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel, struct fc_vport_identifiers *); int fc_vport_terminate(struct fc_vport *vport); +void fc_block_scsi_eh(struct scsi_cmnd *cmnd); #endif /* SCSI_TRANSPORT_FC_H */ -- cgit v1.2.3-59-g8ed1b From 2e76f7670b33a3b0bdf015ed1459e4b417a40ce0 Mon Sep 17 00:00:00 2001 From: Abhijeet Joglekar Date: Tue, 3 Nov 2009 11:45:37 -0800 Subject: [SCSI] fnic: Allocate OS interrupt resources just before enabling interrupts The OS interrupt vectors were getting allocated before the interrupt resources were mapped from hardware. For Legacy interrupts, since they are shared with other devices, as soon as an interrupt is registered with the OS, it can fire while the fnic isr resource is still unmapped. This can cause crash because of access to unmapped resources. For MSIX and MSI, since interrupts are not shared with other devices, this problem didnt happen, because the interrupt is enabled as the last step before returning from _probe. For Legacy however, since the interrupt is shared, the handler can be called as soon as it is registered. Solution is to register interrupt handlers with OS as last step before enabling device interrupts. Signed-off-by: Abhijeet Joglekar Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_main.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index b0d425ab30ab..fc61f17025ce 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -572,19 +572,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev, goto err_out_dev_close; } - err = fnic_request_intr(fnic); - if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Unable to request irq.\n"); - goto err_out_clear_intr; - } - err = fnic_alloc_vnic_resources(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc vNIC resources, " "aborting.\n"); - goto err_out_free_intr; + goto err_out_clear_intr; } @@ -729,6 +722,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev, fc_fabric_login(lp); vnic_dev_enable(fnic->vdev); + + err = fnic_request_intr(fnic); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to request irq.\n"); + goto err_out_free_exch_mgr; + } + for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); @@ -753,8 +754,6 @@ err_out_free_ioreq_pool: mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); -err_out_free_intr: - fnic_free_intr(fnic); err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: @@ -828,8 +827,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev) scsi_remove_host(fnic->lport->host); fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); - fnic_free_vnic_resources(fnic); fnic_free_intr(fnic); + fnic_free_vnic_resources(fnic); fnic_clear_intr_mode(fnic); vnic_dev_close(fnic->vdev); vnic_dev_unregister(fnic->vdev); -- cgit v1.2.3-59-g8ed1b From 2171c225f641c5402e4c47180d791a612278040e Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:45:42 -0800 Subject: [SCSI] fcoe: Increase FCOE_MAX_LUN to 0xFFFF (65535) The maximum number of LUNs was far too low. This value is what most other FC HBAs are using. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index c578082aef8b..a123552847e5 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -32,7 +32,7 @@ #define FCOE_NAME "fcoe" #define FCOE_VENDOR "Open-FCoE.org" -#define FCOE_MAX_LUN 255 +#define FCOE_MAX_LUN 0xFFFF #define FCOE_MAX_FCP_TARGET 256 #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 -- cgit v1.2.3-59-g8ed1b From 1a7b75ae719754c77ccd4d18b0d258ae5db38a25 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:45:47 -0800 Subject: [SCSI] libfc: Move non-common routines and prototypes out of libfc.h This patch moves all non-common routines and function prototypes out of libfc.h and into the appropriate .c files. It makes these routines 'static' when necessary and removes any unnecessary EXPORT_SYMBOL statements. A result of moving the fc_exch_seq_send, fc_seq_els_rsp_send, fc_exch_alloc and fc_seq_start_next prototypes out of libfc.h is that they were no longer being imported into fc_exch.c when libfc.h was included. This caused errors where routines in fc_exch.c were looking for undefined symbols. To fix this this patch reorganizes fc_seq_alloc, fc_seq_start_next and fc_seq_start_next_locked. This move also made it so that fc_seq_start_next_locked did not need to be prototyped at the top of fc_exch.c. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 329 +++++++++++++++++++++++-------------------- include/scsi/libfc.h | 49 ------- 2 files changed, 177 insertions(+), 201 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 170cdf4bac97..659bb05287f3 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -107,7 +107,6 @@ static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, enum fc_els_rjt_explan); static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); -static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); /* * Internal implementation notes. @@ -272,7 +271,6 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fh->fh_seq_cnt = htons(ep->seq.cnt); } - /* * Release a reference to an exchange. * If the refcnt goes to zero and the exchange is complete, it is freed. @@ -372,7 +370,104 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) spin_unlock_bh(&ep->ex_lock); } -int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) +/** + * send a frame using existing sequence and exchange. + */ +static int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + struct fc_frame_header *fh = fc_frame_header_get(fp); + int error; + u32 f_ctl; + + ep = fc_seq_exch(sp); + WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); + + f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, f_ctl); + + /* + * update sequence count if this frame is carrying + * multiple FC frames when sequence offload is enabled + * by LLD. + */ + if (fr_max_payload(fp)) + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), + fr_max_payload(fp)); + else + sp->cnt++; + + /* + * Send the frame. + */ + error = lp->tt.frame_send(lp, fp); + + /* + * Update the exchange and sequence flags, + * assuming all frames for the sequence have been sent. + * We can only be called to send once for each sequence. + */ + spin_lock_bh(&ep->ex_lock); + ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ + if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + return error; +} + +/** + * fc_seq_alloc() - Allocate a sequence. + * @ep: Exchange pointer + * @seq_id: Sequence ID to allocate a sequence for + * + * We don't support multiple originated sequences on the same exchange. + * By implication, any previously originated sequence on this exchange + * is complete, and we reallocate the same sequence. + */ +static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) +{ + struct fc_seq *sp; + + sp = &ep->seq; + sp->ssb_stat = 0; + sp->cnt = 0; + sp->id = seq_id; + return sp; +} + +static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + sp = fc_seq_alloc(ep, ep->seq_id++); + FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", + ep->f_ctl, sp->id); + return sp; +} + +/** + * Allocate a new sequence on the same exchange as the supplied sequence. + * This will never return NULL. + */ +static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + spin_lock_bh(&ep->ex_lock); + sp = fc_seq_start_next_locked(sp); + spin_unlock_bh(&ep->ex_lock); + + return sp; +} + +/** + * This function is for seq_exch_abort function pointer in + * struct libfc_function_template, see comment block on + * seq_exch_abort for description of this function. + */ +static int fc_seq_exch_abort(const struct fc_seq *req_sp, + unsigned int timer_msec) { struct fc_seq *sp; struct fc_exch *ep; @@ -472,24 +567,6 @@ done: fc_exch_release(ep); } -/* - * Allocate a sequence. - * - * We don't support multiple originated sequences on the same exchange. - * By implication, any previously originated sequence on this exchange - * is complete, and we reallocate the same sequence. - */ -static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) -{ - struct fc_seq *sp; - - sp = &ep->seq; - sp->ssb_stat = 0; - sp->cnt = 0; - sp->id = seq_id; - return sp; -} - /** * fc_exch_em_alloc() - allocate an exchange from a specified EM. * @lport: ptr to the local port @@ -570,7 +647,8 @@ err: * EM is selected having either a NULL match function pointer * or call to match function returning true. */ -struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) +static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, + struct fc_frame *fp) { struct fc_exch_mgr_anchor *ema; struct fc_exch *ep; @@ -584,7 +662,6 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) } return NULL; } -EXPORT_SYMBOL(fc_exch_alloc); /* * Lookup and hold an exchange. @@ -607,7 +684,13 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) return ep; } -void fc_exch_done(struct fc_seq *sp) + +/** + * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and + * the memory allocated for the related objects may be freed. + * @sp: Sequence pointer + */ +static void fc_exch_done(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); int rc; @@ -618,7 +701,6 @@ void fc_exch_done(struct fc_seq *sp) if (!rc) fc_exch_delete(ep); } -EXPORT_SYMBOL(fc_exch_done); /* * Allocate a new exchange as responder. @@ -821,76 +903,15 @@ static void fc_exch_set_addr(struct fc_exch *ep, } } -static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) -{ - struct fc_exch *ep = fc_seq_exch(sp); - - sp = fc_seq_alloc(ep, ep->seq_id++); - FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", - ep->f_ctl, sp->id); - return sp; -} -/* - * Allocate a new sequence on the same exchange as the supplied sequence. - * This will never return NULL. +/** + * fc_seq_els_rsp_send() - Send ELS response using mainly infomation + * in exchange and sequence in EM layer. + * @sp: Sequence pointer + * @els_cmd: ELS command + * @els_data: ELS data */ -struct fc_seq *fc_seq_start_next(struct fc_seq *sp) -{ - struct fc_exch *ep = fc_seq_exch(sp); - - spin_lock_bh(&ep->ex_lock); - sp = fc_seq_start_next_locked(sp); - spin_unlock_bh(&ep->ex_lock); - - return sp; -} -EXPORT_SYMBOL(fc_seq_start_next); - -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp) -{ - struct fc_exch *ep; - struct fc_frame_header *fh = fc_frame_header_get(fp); - int error; - u32 f_ctl; - - ep = fc_seq_exch(sp); - WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); - - f_ctl = ntoh24(fh->fh_f_ctl); - fc_exch_setup_hdr(ep, fp, f_ctl); - - /* - * update sequence count if this frame is carrying - * multiple FC frames when sequence offload is enabled - * by LLD. - */ - if (fr_max_payload(fp)) - sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), - fr_max_payload(fp)); - else - sp->cnt++; - - /* - * Send the frame. - */ - error = lp->tt.frame_send(lp, fp); - - /* - * Update the exchange and sequence flags, - * assuming all frames for the sequence have been sent. - * We can only be called to send once for each sequence. - */ - spin_lock_bh(&ep->ex_lock); - ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ - if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) - ep->esb_stat &= ~ESB_ST_SEQ_INIT; - spin_unlock_bh(&ep->ex_lock); - return error; -} -EXPORT_SYMBOL(fc_seq_send); - -void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data) +static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data) { switch (els_cmd) { case ELS_LS_RJT: @@ -909,7 +930,6 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); } } -EXPORT_SYMBOL(fc_seq_els_rsp_send); /* * Send a sequence, which is also the last sequence in the exchange. @@ -1662,6 +1682,68 @@ cleanup: fc_exch_release(aborted_ep); } + +/** + * This function is for exch_seq_send function pointer in + * struct libfc_function_template, see comment block on + * exch_seq_send for description of this function. + */ +static struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, + struct fc_frame *fp, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *, + void *), + void *arg, u32 timer_msec) +{ + struct fc_exch *ep; + struct fc_seq *sp = NULL; + struct fc_frame_header *fh; + int rc = 1; + + ep = fc_exch_alloc(lp, fp); + if (!ep) { + fc_frame_free(fp); + return NULL; + } + ep->esb_stat |= ESB_ST_SEQ_INIT; + fh = fc_frame_header_get(fp); + fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); + ep->resp = resp; + ep->destructor = destructor; + ep->arg = arg; + ep->r_a_tov = FC_DEF_R_A_TOV; + ep->lp = lp; + sp = &ep->seq; + + ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ + ep->f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, ep->f_ctl); + sp->cnt++; + + if (ep->xid <= lp->lro_xid) + fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + + if (unlikely(lp->tt.frame_send(lp, fp))) + goto err; + + if (timer_msec) + fc_exch_timer_set_locked(ep, timer_msec); + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ + + if (ep->f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + return sp; +err: + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); + return NULL; +} + /* * Send ELS RRQ - Reinstate Recovery Qualifier. * This tells the remote port to stop blocking the use of @@ -1902,63 +1984,6 @@ void fc_exch_mgr_free(struct fc_lport *lport) } EXPORT_SYMBOL(fc_exch_mgr_free); - -struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *, void *), - void *arg, u32 timer_msec) -{ - struct fc_exch *ep; - struct fc_seq *sp = NULL; - struct fc_frame_header *fh; - int rc = 1; - - ep = fc_exch_alloc(lp, fp); - if (!ep) { - fc_frame_free(fp); - return NULL; - } - ep->esb_stat |= ESB_ST_SEQ_INIT; - fh = fc_frame_header_get(fp); - fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); - ep->resp = resp; - ep->destructor = destructor; - ep->arg = arg; - ep->r_a_tov = FC_DEF_R_A_TOV; - ep->lp = lp; - sp = &ep->seq; - - ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ - ep->f_ctl = ntoh24(fh->fh_f_ctl); - fc_exch_setup_hdr(ep, fp, ep->f_ctl); - sp->cnt++; - - if (ep->xid <= lp->lro_xid) - fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); - - if (unlikely(lp->tt.frame_send(lp, fp))) - goto err; - - if (timer_msec) - fc_exch_timer_set_locked(ep, timer_msec); - ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ - - if (ep->f_ctl & FC_FC_SEQ_INIT) - ep->esb_stat &= ~ESB_ST_SEQ_INIT; - spin_unlock_bh(&ep->ex_lock); - return sp; -err: - rc = fc_exch_done_locked(ep); - spin_unlock_bh(&ep->ex_lock); - if (!rc) - fc_exch_delete(ep); - return NULL; -} -EXPORT_SYMBOL(fc_exch_seq_send); - /* * Receive a frame */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 9617f9365e45..f207b6cac06f 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1011,55 +1011,6 @@ void fc_exch_mgr_free(struct fc_lport *lport); */ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp); -/* - * This function is for exch_seq_send function pointer in - * struct libfc_function_template, see comment block on - * exch_seq_send for description of this function. - */ -struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *sp, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *sp, - void *arg), - void *arg, u32 timer_msec); - -/* - * send a frame using existing sequence and exchange. - */ -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp); - -/* - * Send ELS response using mainly infomation - * in exchange and sequence in EM layer. - */ -void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data); - -/* - * This function is for seq_exch_abort function pointer in - * struct libfc_function_template, see comment block on - * seq_exch_abort for description of this function. - */ -int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec); - -/* - * Indicate that an exchange/sequence tuple is complete and the memory - * allocated for the related objects may be freed. - */ -void fc_exch_done(struct fc_seq *sp); - -/* - * Allocate a new exchange and sequence pair. - */ -struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp); -/* - * Start a new sequence on the same exchange as the supplied sequence. - */ -struct fc_seq *fc_seq_start_next(struct fc_seq *sp); - - /* * Reset all EMs of a lport, releasing its all sequences and * exchanges. If sid is non-zero, then reset only exchanges -- cgit v1.2.3-59-g8ed1b From 255f6386b816b2bc0c251af0ee4985ad5a8461b7 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:45:52 -0800 Subject: [SCSI] libfc: Remove fc_fcp_complete This function is never used, let's remove it. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 17 ----------------- include/scsi/libfc.h | 8 -------- 2 files changed, 25 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 479af9352a42..3ab08f8dfb25 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1874,23 +1874,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) fc_fcp_pkt_release(fsp); } -/** - * fc_fcp_complete() - complete processing of a fcp packet - * @fsp: fcp packet - * - * This function may sleep if a fsp timer is pending. - * The host lock must not be held by caller. - */ -void fc_fcp_complete(struct fc_fcp_pkt *fsp) -{ - if (fc_fcp_lock_pkt(fsp)) - return; - - fc_fcp_complete_locked(fsp); - fc_fcp_unlock_pkt(fsp); -} -EXPORT_SYMBOL(fc_fcp_complete); - /** * fc_eh_abort() - Abort a command * @sc_cmd: scsi command to abort diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f207b6cac06f..db2175da2da5 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -887,14 +887,6 @@ int fc_fcp_init(struct fc_lport *); int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)); -/* - * complete processing of a fcp packet - * - * This function may sleep if a fsp timer is pending. - * The host lock must not be held by caller. - */ -void fc_fcp_complete(struct fc_fcp_pkt *fsp); - /* * Send an ABTS frame to the target device. The sc_cmd argument * is a pointer to the SCSI command to be aborted. -- cgit v1.2.3-59-g8ed1b From 8866a5d9075b7129194576f5f810e85a693c40ba Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:45:58 -0800 Subject: [SCSI] libfc: Add libfc/fc_libfc.[ch] for libfc internal routines include/scsi/libfc.h is currently loaded with common code shared between libfc's sub-modules as well as shared between libfc and fcoe. Previous patches attempted to move out non-common code. This patch creates two files for common libfc routines that will not be shared with fcoe, fnic or any other LLDs. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/Makefile | 1 + drivers/scsi/libfc/fc_disc.c | 2 + drivers/scsi/libfc/fc_exch.c | 2 + drivers/scsi/libfc/fc_fcp.c | 8 +--- drivers/scsi/libfc/fc_libfc.c | 35 +++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 102 ++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_lport.c | 2 + drivers/scsi/libfc/fc_rport.c | 2 + include/scsi/libfc.h | 79 -------------------------------- 9 files changed, 147 insertions(+), 86 deletions(-) create mode 100644 drivers/scsi/libfc/fc_libfc.c create mode 100644 drivers/scsi/libfc/fc_libfc.h diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile index 55f982de3a9a..2be549c1db77 100644 --- a/drivers/scsi/libfc/Makefile +++ b/drivers/scsi/libfc/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_LIBFC) += libfc.o libfc-objs := \ + fc_libfc.o \ fc_disc.o \ fc_exch.o \ fc_elsct.o \ diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index d4cb3f9b1a0d..a4bdec28fef5 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -40,6 +40,8 @@ #include +#include "fc_libfc.h" + #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 659bb05287f3..ee6031e24c14 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -32,6 +32,8 @@ #include #include +#include "fc_libfc.h" + u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 3ab08f8dfb25..8a31ced98bd0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -39,13 +39,7 @@ #include #include -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("libfc"); -MODULE_LICENSE("GPL v2"); - -unsigned int fc_debug_logging; -module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); +#include "fc_libfc.h" static struct kmem_cache *scsi_pkt_cachep; diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c new file mode 100644 index 000000000000..e64ea870a4c8 --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.c @@ -0,0 +1,35 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include + +#include + +#include "fc_libfc.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("libfc"); +MODULE_LICENSE("GPL v2"); + +unsigned int fc_debug_logging; +module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h new file mode 100644 index 000000000000..388fae4364af --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.h @@ -0,0 +1,102 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_LIBFC_H_ +#define _FC_LIBFC_H_ + +#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ +#define FC_LPORT_LOGGING 0x02 /* lport layer logging */ +#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ +#define FC_RPORT_LOGGING 0x08 /* rport layer logging */ +#define FC_FCP_LOGGING 0x10 /* I/O path logging */ +#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ +#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ +#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ + +extern unsigned int fc_debug_logging; + +#define FC_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(fc_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define FC_LIBFC_DBG(fmt, args...) \ + FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ + printk(KERN_INFO "libfc: " fmt, ##args)) + +#define FC_LPORT_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ + printk(KERN_INFO "host%u: lport %6x: " fmt, \ + (lport)->host->host_no, \ + fc_host_port_id((lport)->host), ##args)) + +#define FC_DISC_DBG(disc, fmt, args...) \ + FC_CHECK_LOGGING(FC_DISC_LOGGING, \ + printk(KERN_INFO "host%u: disc: " fmt, \ + (disc)->lport->host->host_no, \ + ##args)) + +#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ + FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ + printk(KERN_INFO "host%u: rport %6x: " fmt, \ + (lport)->host->host_no, \ + (port_id), ##args)) + +#define FC_RPORT_DBG(rdata, fmt, args...) \ + FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) + +#define FC_FCP_DBG(pkt, fmt, args...) \ + FC_CHECK_LOGGING(FC_FCP_LOGGING, \ + printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ + (pkt)->lp->host->host_no, \ + pkt->rport->port_id, ##args)) + +#define FC_EXCH_DBG(exch, fmt, args...) \ + FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ + printk(KERN_INFO "host%u: xid %4x: " fmt, \ + (exch)->lp->host->host_no, \ + exch->xid, ##args)) + +#define FC_SCSI_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ + printk(KERN_INFO "host%u: scsi: " fmt, \ + (lport)->host->host_no, ##args)) + +/* + * Set up direct-data placement for this I/O request + */ +void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); + +/* + * Module setup functions + */ +int fc_setup_exch_mgr(void); +void fc_destroy_exch_mgr(void); +int fc_setup_rport(void); +void fc_destroy_rport(void); + +/* + * Internal libfc functions + */ +const char *fc_els_resp_type(struct fc_frame *); + +#endif /* _FC_LIBFC_H_ */ diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 536492ae6a88..f7f20a46e494 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -95,6 +95,8 @@ #include #include +#include "fc_libfc.h" + /* Fabric IDs to use for point-to-point mode, chosen on whims. */ #define FC_LOCAL_PTP_FID_LO 0x010101 #define FC_LOCAL_PTP_FID_HI 0x010102 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 324e156b5d07..622285c81fef 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -55,6 +55,8 @@ #include #include +#include "fc_libfc.h" + struct workqueue_struct *rport_event_queue; static void fc_rport_enter_plogi(struct fc_rport_priv *); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index db2175da2da5..690f8296e633 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -34,67 +34,6 @@ #include -#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ -#define FC_LPORT_LOGGING 0x02 /* lport layer logging */ -#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ -#define FC_RPORT_LOGGING 0x08 /* rport layer logging */ -#define FC_FCP_LOGGING 0x10 /* I/O path logging */ -#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ -#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ -#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ - -extern unsigned int fc_debug_logging; - -#define FC_CHECK_LOGGING(LEVEL, CMD) \ -do { \ - if (unlikely(fc_debug_logging & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ -} while (0) - -#define FC_LIBFC_DBG(fmt, args...) \ - FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ - printk(KERN_INFO "libfc: " fmt, ##args)) - -#define FC_LPORT_DBG(lport, fmt, args...) \ - FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ - printk(KERN_INFO "host%u: lport %6x: " fmt, \ - (lport)->host->host_no, \ - fc_host_port_id((lport)->host), ##args)) - -#define FC_DISC_DBG(disc, fmt, args...) \ - FC_CHECK_LOGGING(FC_DISC_LOGGING, \ - printk(KERN_INFO "host%u: disc: " fmt, \ - (disc)->lport->host->host_no, \ - ##args)) - -#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ - FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ - printk(KERN_INFO "host%u: rport %6x: " fmt, \ - (lport)->host->host_no, \ - (port_id), ##args)) - -#define FC_RPORT_DBG(rdata, fmt, args...) \ - FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) - -#define FC_FCP_DBG(pkt, fmt, args...) \ - FC_CHECK_LOGGING(FC_FCP_LOGGING, \ - printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ - (pkt)->lp->host->host_no, \ - pkt->rport->port_id, ##args)) - -#define FC_EXCH_DBG(exch, fmt, args...) \ - FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ - printk(KERN_INFO "host%u: xid %4x: " fmt, \ - (exch)->lp->host->host_no, \ - exch->xid, ##args)) - -#define FC_SCSI_DBG(lport, fmt, args...) \ - FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ - printk(KERN_INFO "host%u: scsi: " fmt, \ - (lport)->host->host_no, ##args)) - /* * libfc error codes */ @@ -923,11 +862,6 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type); */ void fc_fcp_destroy(struct fc_lport *); -/* - * Set up direct-data placement for this I/O request - */ -void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); - /* * ELS/CT interface *****************************/ @@ -1020,17 +954,4 @@ void fc_get_host_port_state(struct Scsi_Host *shost); void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout); struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); -/* - * module setup functions. - */ -int fc_setup_exch_mgr(void); -void fc_destroy_exch_mgr(void); -int fc_setup_rport(void); -void fc_destroy_rport(void); - -/* - * Internal libfc functions. - */ -const char *fc_els_resp_type(struct fc_frame *); - #endif /* _LIBFC_H_ */ -- cgit v1.2.3-59-g8ed1b From 93e6d5ab9969a9200752658677eafd96772302f0 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:46:03 -0800 Subject: [SCSI] libfc: Move libfc_init and libfc_exit to fc_libfc.c These routines are for the libfc kernel module and should be in the libfc .c file. Moving the libfc __init routine into fc_libfc.c caused the creation of the fc_setup_fcp() and fc_destroy_fcp() routines so that scsi_pkt_cachep was not exposed outside of fc_fcp.c. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 63 ++++++++++++++++--------------------------- drivers/scsi/libfc/fc_libfc.c | 39 +++++++++++++++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 2 ++ 3 files changed, 64 insertions(+), 40 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 8a31ced98bd0..866f78ac4ec2 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -41,7 +41,7 @@ #include "fc_libfc.h" -static struct kmem_cache *scsi_pkt_cachep; +struct kmem_cache *scsi_pkt_cachep; /* SRB state definitions */ #define FC_SRB_FREE 0 /* cmd is free */ @@ -2072,6 +2072,28 @@ void fc_fcp_destroy(struct fc_lport *lp) } EXPORT_SYMBOL(fc_fcp_destroy); +int fc_setup_fcp() +{ + int rc = 0; + + scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", + sizeof(struct fc_fcp_pkt), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!scsi_pkt_cachep) { + printk(KERN_ERR "libfc: Unable to allocate SRB cache, " + "module load failed!"); + rc = -ENOMEM; + } + + return rc; +} + +void fc_destroy_fcp() +{ + if (scsi_pkt_cachep) + kmem_cache_destroy(scsi_pkt_cachep); +} + int fc_fcp_init(struct fc_lport *lp) { int rc; @@ -2104,42 +2126,3 @@ free_internal: return rc; } EXPORT_SYMBOL(fc_fcp_init); - -static int __init libfc_init(void) -{ - int rc; - - scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", - sizeof(struct fc_fcp_pkt), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (scsi_pkt_cachep == NULL) { - printk(KERN_ERR "libfc: Unable to allocate SRB cache, " - "module load failed!"); - return -ENOMEM; - } - - rc = fc_setup_exch_mgr(); - if (rc) - goto destroy_pkt_cache; - - rc = fc_setup_rport(); - if (rc) - goto destroy_em; - - return rc; -destroy_em: - fc_destroy_exch_mgr(); -destroy_pkt_cache: - kmem_cache_destroy(scsi_pkt_cachep); - return rc; -} - -static void __exit libfc_exit(void) -{ - kmem_cache_destroy(scsi_pkt_cachep); - fc_destroy_exch_mgr(); - fc_destroy_rport(); -} - -module_init(libfc_init); -module_exit(libfc_exit); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index e64ea870a4c8..01418ae8cb84 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -33,3 +33,42 @@ MODULE_LICENSE("GPL v2"); unsigned int fc_debug_logging; module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +/** + * libfc_init() - Initialize libfc.ko + */ +static int __init libfc_init(void) +{ + int rc = 0; + + rc = fc_setup_fcp(); + if (rc) + return rc; + + rc = fc_setup_exch_mgr(); + if (rc) + goto destroy_pkt_cache; + + rc = fc_setup_rport(); + if (rc) + goto destroy_em; + + return rc; +destroy_em: + fc_destroy_exch_mgr(); +destroy_pkt_cache: + fc_destroy_fcp(); + return rc; +} +module_init(libfc_init); + +/** + * libfc_exit() - Tear down libfc.ko + */ +static void __exit libfc_exit(void) +{ + fc_destroy_fcp(); + fc_destroy_exch_mgr(); + fc_destroy_rport(); +} +module_exit(libfc_exit); diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index 388fae4364af..0530149ac174 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -93,6 +93,8 @@ int fc_setup_exch_mgr(void); void fc_destroy_exch_mgr(void); int fc_setup_rport(void); void fc_destroy_rport(void); +int fc_setup_fcp(void); +void fc_destroy_fcp(void); /* * Internal libfc functions -- cgit v1.2.3-59-g8ed1b From 86221969e20a2f60ce104160dc836a964974673b Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:08 -0800 Subject: [SCSI] libfc: changes to libfc_host_alloc to consolidate initialization with allocation I'd like to keep basic initialization together with allocation, which means this can't just be a tail-call to scsi_host_alloc. This is needed to create a generic libfc host allocation routine for NPIV VN_Ports, which will share the exchange ID space (through sharing exchange manager structures) with the parent lport. In order to clone the exchange manager list when the lport is allocated, the list head must be initialized earlier. Also, update fnic to use the libfc_host_alloc so that later changes do not break it. (contribution by Joe Eykholt) Signed-off-by: Chris Leech Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 8 +++----- drivers/scsi/fnic/fnic_main.c | 10 ++++------ drivers/scsi/libfc/fc_lport.c | 1 - include/scsi/libfc.h | 15 ++++++++++++--- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 4efbc17a7d7f..8ca488de492d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -515,8 +515,6 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, int rc = 0; /* lport scsi host config */ - lp->host = shost; - lp->host->max_lun = FCOE_MAX_LUN; lp->host->max_id = FCOE_MAX_FCP_TARGET; lp->host->max_channel = 0; @@ -734,14 +732,14 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - shost = libfc_host_alloc(&fcoe_shost_template, + lport = libfc_host_alloc(&fcoe_shost_template, sizeof(struct fcoe_port)); - if (!shost) { + if (!lport) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); rc = -ENOMEM; goto out; } - lport = shost_priv(shost); + shost = lport->host; port = lport_priv(lport); port->lport = lport; port->fcoe = fcoe; diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index fc61f17025ce..018cc427504a 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -424,15 +424,13 @@ static int __devinit fnic_probe(struct pci_dev *pdev, * Allocate SCSI Host and set up association between host, * local port, and fnic */ - host = scsi_host_alloc(&fnic_host_template, - sizeof(struct fc_lport) + sizeof(struct fnic)); - if (!host) { - printk(KERN_ERR PFX "Unable to alloc SCSI host\n"); + lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); + if (!lp) { + printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); err = -ENOMEM; goto err_out; } - lp = shost_priv(host); - lp->host = host; + host = lp->host; fnic = lport_priv(lp); fnic->lport = lp; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f7f20a46e494..41650d336289 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1505,7 +1505,6 @@ int fc_lport_init(struct fc_lport *lport) if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; - INIT_LIST_HEAD(&lport->ema_list); return 0; } EXPORT_SYMBOL(fc_lport_init); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 690f8296e633..ed3057b4e78d 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -739,12 +739,21 @@ static inline void *lport_priv(const struct fc_lport *lp) * @sht: ptr to the scsi host templ * @priv_size: size of private data after fc_lport * - * Returns: ptr to Scsi_Host + * Returns: libfc lport */ -static inline struct Scsi_Host * +static inline struct fc_lport * libfc_host_alloc(struct scsi_host_template *sht, int priv_size) { - return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); + struct fc_lport *lport; + struct Scsi_Host *shost; + + shost = scsi_host_alloc(sht, sizeof(*lport) + priv_size); + if (!shost) + return NULL; + lport = shost_priv(shost); + lport->host = shost; + INIT_LIST_HEAD(&lport->ema_list); + return lport; } /* -- cgit v1.2.3-59-g8ed1b From 174e1ebffd30a7599b889900089f7acef944cc6b Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:14 -0800 Subject: [SCSI] libfc: add some generic NPIV support routines to libfc Adds a function to create a new VN_Port instances, which share the EM list with the N_Port, VN_Port lookup by fabric ID when responding to a new request (otherwise the exchange lookup from the N_Ports EM list is trusted to return an exchange with a cached lport value for the correct VN_Port), a pointer to a fc_vport structure for VN_Ports, and flags to indicate if an N_Port supports NPIV and if the switch/fabric allows it. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/Makefile | 3 +- drivers/scsi/libfc/fc_exch.c | 29 +++++++++++++++ drivers/scsi/libfc/fc_npiv.c | 86 ++++++++++++++++++++++++++++++++++++++++++++ include/scsi/libfc.h | 20 +++++++++++ 4 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 drivers/scsi/libfc/fc_npiv.c diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile index 2be549c1db77..4bb23ac86a5c 100644 --- a/drivers/scsi/libfc/Makefile +++ b/drivers/scsi/libfc/Makefile @@ -10,4 +10,5 @@ libfc-objs := \ fc_frame.o \ fc_lport.o \ fc_rport.o \ - fc_fcp.o + fc_fcp.o \ + fc_npiv.o diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index ee6031e24c14..751a485685d9 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1134,6 +1134,15 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, u32 f_ctl; enum fc_pf_rjt_reason reject; + /* We can have the wrong fc_lport at this point with NPIV, which is a + * problem now that we know a new exchange needs to be allocated + */ + lp = fc_vport_id_lookup(lp, ntoh24(fh->fh_d_id)); + if (!lp) { + fc_frame_free(fp); + return; + } + fr_seq(fp) = NULL; reject = fc_seq_lookup_recip(lp, mp, fp); if (reject == FC_RJT_NONE) { @@ -1900,6 +1909,26 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) } EXPORT_SYMBOL(fc_exch_mgr_del); +/** + * fc_exch_mgr_list_clone() - share all exchange manager objects + * @src: source lport to clone exchange managers from + * @dst: new lport that takes references to all the exchange managers + */ +int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) +{ + struct fc_exch_mgr_anchor *ema, *tmp; + + list_for_each_entry(ema, &src->ema_list, ema_list) { + if (!fc_exch_mgr_add(dst, ema->mp, ema->match)) + goto err; + } + return 0; +err: + list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list) + fc_exch_mgr_del(ema); + return -ENOMEM; +} + struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, u16 min_xid, u16 max_xid, diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c new file mode 100644 index 000000000000..39f02c09a8d9 --- /dev/null +++ b/drivers/scsi/libfc/fc_npiv.c @@ -0,0 +1,86 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * NPIV VN_Port helper functions for libfc + */ + +#include + +/** + * fc_vport_create() - Create a new NPIV vport instance + * @vport: fc_vport structure from scsi_transport_fc + * @privsize: driver private data size to allocate along with the Scsi_Host + */ + +struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + + vn_port = libfc_host_alloc(shost->hostt, privsize); + if (!vn_port) + goto err_out; + if (fc_exch_mgr_list_clone(n_port, vn_port)) + goto err_put; + + vn_port->vport = vport; + vport->dd_data = vn_port; + + mutex_lock(&n_port->lp_mutex); + list_add_tail(&vn_port->list, &n_port->vports); + mutex_unlock(&n_port->lp_mutex); + + return vn_port; + +err_put: + scsi_host_put(vn_port->host); +err_out: + return NULL; +} +EXPORT_SYMBOL(libfc_vport_create); + +/** + * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID + * @n_port: Top level N_Port which may have multiple NPIV VN_Ports + * @port_id: Fabric ID to find a match for + * + * Returns: matching lport pointer or NULL if there is no match + */ +struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) +{ + struct fc_lport *lport = NULL; + struct fc_lport *vn_port; + + if (fc_host_port_id(n_port->host) == port_id) + return n_port; + + mutex_lock(&n_port->lp_mutex); + list_for_each_entry(vn_port, &n_port->vports, list) { + if (fc_host_port_id(vn_port->host) == port_id) { + lport = vn_port; + break; + } + } + mutex_unlock(&n_port->lp_mutex); + + return lport; +} + diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index ed3057b4e78d..2c6d55de8ccd 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -640,6 +640,8 @@ struct fc_lport { /* Associations */ struct Scsi_Host *host; struct list_head ema_list; + struct list_head vports; /* child vports if N_Port */ + struct fc_vport *vport; /* parent vport if VN_Port */ struct fc_rport_priv *dns_rp; struct fc_rport_priv *ptp_rp; void *scsi_priv; @@ -664,6 +666,8 @@ struct fc_lport { u32 seq_offload:1; /* seq offload supported */ u32 crc_offload:1; /* crc offload supported */ u32 lro_enabled:1; /* large receive offload */ + u32 does_npiv:1; /* supports multiple vports */ + u32 npiv_enabled:1; /* switch/fabric allows NPIV */ u32 mfs; /* max FC payload size */ unsigned int service_params; unsigned int e_d_tov; @@ -753,6 +757,7 @@ libfc_host_alloc(struct scsi_host_template *sht, int priv_size) lport = shost_priv(shost); lport->host = shost; INIT_LIST_HEAD(&lport->ema_list); + INIT_LIST_HEAD(&lport->vports); return lport; } @@ -805,6 +810,15 @@ int fc_lport_reset(struct fc_lport *); */ int fc_set_mfs(struct fc_lport *lp, u32 mfs); +/* + * Allocate a new lport struct for an NPIV VN_Port + */ +struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize); + +/* + * Find an NPIV VN_Port by port ID + */ +struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id); /* * REMOTE PORT LAYER @@ -911,6 +925,12 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, */ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema); +/* + * Clone an exchange manager list, getting reference holds for each EM. + * This is for use with NPIV and sharing the X_ID space between VN_Ports. + */ +int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst); + /* * Allocates an Exchange Manager (EM). * -- cgit v1.2.3-59-g8ed1b From 8faecddb212d502b1b77936498b9a82b13c4ff44 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:19 -0800 Subject: [SCSI] libfc: vport link handling and fc_vport state managment NPIV vports are managed in libfc by changing their virtual link state when the parent N_Ports internal state changes. The vport link is only online when the N_Port is in a ready state (logged into the fabric). vport_state is updated as needed in this patch as well, currently the states LINKDOWN, INITIALIZING, ACTIVE, DSIABLED, and NO_FABRIC_SUPP are used. This also changes the fc_host port_state handling to differentiate between LINKDOWN and OFFLINE. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 72 ++++++++++++++++++++++++++++++++--------- drivers/scsi/libfc/fc_npiv.c | 75 +++++++++++++++++++++++++++++++++++++++++++ include/scsi/libfc.h | 8 +++++ 3 files changed, 140 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 41650d336289..46897cf23ea6 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -224,10 +224,18 @@ void fc_get_host_port_state(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); - if (lp->link_up) - fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + mutex_lock(&lp->lp_mutex); + if (!lp->link_up) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; else - fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + switch (lp->state) { + case LPORT_ST_READY: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + default: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + } + mutex_unlock(&lp->lp_mutex); } EXPORT_SYMBOL(fc_get_host_port_state); @@ -493,40 +501,62 @@ int fc_fabric_login(struct fc_lport *lport) EXPORT_SYMBOL(fc_fabric_login); /** - * fc_linkup() - Handler for transport linkup events + * __fc_linkup() - Handler for transport linkup events * @lport: The lport whose link is up + * + * Locking: must be called with the lp_mutex held */ -void fc_linkup(struct fc_lport *lport) +void __fc_linkup(struct fc_lport *lport) { - printk(KERN_INFO "libfc: Link up on port (%6x)\n", - fc_host_port_id(lport->host)); - - mutex_lock(&lport->lp_mutex); if (!lport->link_up) { lport->link_up = 1; if (lport->state == LPORT_ST_RESET) fc_lport_enter_flogi(lport); } +} + +/** + * fc_linkup() - Handler for transport linkup events + * @lport: The lport whose link is up + */ +void fc_linkup(struct fc_lport *lport) +{ + printk(KERN_INFO "libfc: Link up on port (%6x)\n", + fc_host_port_id(lport->host)); + + mutex_lock(&lport->lp_mutex); + __fc_linkup(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkup); /** - * fc_linkdown() - Handler for transport linkdown events + * __fc_linkdown() - Handler for transport linkdown events * @lport: The lport whose link is down + * + * Locking: must be called with the lp_mutex held */ -void fc_linkdown(struct fc_lport *lport) +void __fc_linkdown(struct fc_lport *lport) { - mutex_lock(&lport->lp_mutex); - printk(KERN_INFO "libfc: Link down on port (%6x)\n", - fc_host_port_id(lport->host)); - if (lport->link_up) { lport->link_up = 0; fc_lport_enter_reset(lport); lport->tt.fcp_cleanup(lport); } +} + +/** + * fc_linkdown() - Handler for transport linkdown events + * @lport: The lport whose link is down + */ +void fc_linkdown(struct fc_lport *lport) +{ + printk(KERN_INFO "libfc: Link down on port (%6x)\n", + fc_host_port_id(lport->host)); + + mutex_lock(&lport->lp_mutex); + __fc_linkdown(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkdown); @@ -654,6 +684,9 @@ static void fc_lport_enter_ready(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_READY); + if (lport->vport) + fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); + fc_vports_linkchange(lport); if (!lport->ptp_rp) lport->tt.disc_start(fc_lport_disc_callback, lport); @@ -868,7 +901,14 @@ static void fc_lport_enter_reset(struct fc_lport *lport) FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", fc_lport_state(lport)); + if (lport->vport) { + if (lport->link_up) + fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); + else + fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); + } fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_vports_linkchange(lport); fc_lport_reset_locked(lport); if (lport->link_up) fc_lport_enter_flogi(lport); @@ -887,6 +927,7 @@ static void fc_lport_enter_disabled(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DISABLED); + fc_vports_linkchange(lport); fc_lport_reset_locked(lport); } @@ -1333,6 +1374,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_LOGO); + fc_vports_linkchange(lport); fp = fc_frame_alloc(lport, sizeof(*logo)); if (!fp) { diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c index 39f02c09a8d9..c68f6c7341c2 100644 --- a/drivers/scsi/libfc/fc_npiv.c +++ b/drivers/scsi/libfc/fc_npiv.c @@ -84,3 +84,78 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) return lport; } +/* + * When setting the link state of vports during an lport state change, it's + * necessary to hold the lp_mutex of both the N_Port and the VN_Port. + * This tells the lockdep engine to treat the nested locking of the VN_Port + * as a different lock class. + */ +enum libfc_lport_mutex_class { + LPORT_MUTEX_NORMAL = 0, + LPORT_MUTEX_VN_PORT = 1, +}; + +/** + * __fc_vport_setlink() - update link and status on a VN_Port + * @n_port: parent N_Port + * @vn_port: VN_Port to update + * + * Locking: must be called with both the N_Port and VN_Port lp_mutex held + */ +static void __fc_vport_setlink(struct fc_lport *n_port, + struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + + if (vn_port->state == LPORT_ST_DISABLED) + return; + + if (n_port->state == LPORT_ST_READY) { + if (n_port->npiv_enabled) { + fc_vport_set_state(vport, FC_VPORT_INITIALIZING); + __fc_linkup(vn_port); + } else { + fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + __fc_linkdown(vn_port); + } + } else { + fc_vport_set_state(vport, FC_VPORT_LINKDOWN); + __fc_linkdown(vn_port); + } +} + +/** + * fc_vport_setlink() - update link and status on a VN_Port + * @vn_port: virtual port to update + */ +void fc_vport_setlink(struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + + mutex_lock(&n_port->lp_mutex); + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + mutex_unlock(&n_port->lp_mutex); +} +EXPORT_SYMBOL(fc_vport_setlink); + +/** + * fc_vports_linkchange() - change the link state of all vports + * @n_port: Parent N_Port that has changed state + * + * Locking: called with the n_port lp_mutex held + */ +void fc_vports_linkchange(struct fc_lport *n_port) +{ + struct fc_lport *vn_port; + + list_for_each_entry(vn_port, &n_port->vports, list) { + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + } +} + diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2c6d55de8ccd..dfeb1ee4f03f 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -788,11 +788,13 @@ int fc_fabric_login(struct fc_lport *lp); /* * The link is up for the given local port. */ +void __fc_linkup(struct fc_lport *); void fc_linkup(struct fc_lport *); /* * Link is down for the given local port. */ +void __fc_linkdown(struct fc_lport *); void fc_linkdown(struct fc_lport *); /* @@ -820,6 +822,12 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize); */ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id); +/* + * NPIV VN_Port link state management + */ +void fc_vport_setlink(struct fc_lport *vn_port); +void fc_vports_linkchange(struct fc_lport *n_port); + /* * REMOTE PORT LAYER *****************************/ -- cgit v1.2.3-59-g8ed1b From db36c06cc6802d03bcba08982377f7c03a3cda7f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:24 -0800 Subject: [SCSI] libfc, libfcoe: FDISC ELS for NPIV Add FDISC ELS handling to libfc and libfcoe, treat it the same as FLOGI where appropriate. Add checking for NPIV support in the FLOGI LS_ACC service parameters. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 6 +++--- drivers/scsi/libfc/fc_lport.c | 6 +++++- include/scsi/fc/fc_els.h | 4 +++- include/scsi/fc_encode.h | 29 +++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 11ae5c94608b..d8ea04a29199 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -449,7 +449,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, memset(mac, 0, sizeof(mac)); mac->fd_desc.fip_dtype = FIP_DT_MAC; mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; - if (dtype != FIP_DT_FLOGI) + if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); else if (fip->spma) memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); @@ -865,8 +865,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; els_op = *(u8 *)(fh + 1); - if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && - fip->flogi_oxid == ntohs(fh->fh_ox_id) && + if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && + sub == FIP_SC_REP && fip->flogi_oxid == ntohs(fh->fh_ox_id) && els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) { fip->flogi_oxid = FC_XID_UNKNOWN; fip->update_mac(fip, fip->data_src_addr, granted_mac); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 46897cf23ea6..ccba67ca68a1 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1449,6 +1449,9 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; + + lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); + if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; @@ -1498,7 +1501,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport) if (!fp) return fc_lport_error(lport, fp); - if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + lport->vport ? ELS_FDISC : ELS_FLOGI, fc_lport_flogi_resp, lport, lport->e_d_tov)) fc_lport_error(lport, NULL); } diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h index 195ca014d3ce..b0872afe2d30 100644 --- a/include/scsi/fc/fc_els.h +++ b/include/scsi/fc/fc_els.h @@ -248,10 +248,12 @@ struct fc_els_csp { /* * sp_features */ -#define FC_SP_FT_CIRO 0x8000 /* continuously increasing rel. off. */ +#define FC_SP_FT_NPIV 0x8000 /* multiple N_Port_ID support (FLOGI) */ +#define FC_SP_FT_CIRO 0x8000 /* continuously increasing rel off (PLOGI) */ #define FC_SP_FT_CLAD 0x8000 /* clean address (in FLOGI LS_ACC) */ #define FC_SP_FT_RAND 0x4000 /* random relative offset */ #define FC_SP_FT_VAL 0x2000 /* valid vendor version level */ +#define FC_SP_FT_NPIV_ACC 0x2000 /* NPIV assignment (FLOGI LS_ACC) */ #define FC_SP_FT_FPORT 0x1000 /* F port (1) vs. N port (0) */ #define FC_SP_FT_ABB 0x0800 /* alternate BB_credit management */ #define FC_SP_FT_EDTR 0x0400 /* E_D_TOV Resolution is nanoseconds */ diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 27dad703824f..c93ca3ece1a0 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -198,6 +198,31 @@ static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp) sp->sp_bb_data = htons((u16) lport->mfs); cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); + if (lport->does_npiv) + sp->sp_features = htons(FC_SP_FT_NPIV); +} + +/** + * fc_fdisc_fill - Fill in a fdisc request frame. + */ +static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_csp *sp; + struct fc_els_cssp *cp; + struct fc_els_flogi *fdisc; + + fdisc = fc_frame_payload_get(fp, sizeof(*fdisc)); + memset(fdisc, 0, sizeof(*fdisc)); + fdisc->fl_cmd = (u8) ELS_FDISC; + put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn); + put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn); + sp = &fdisc->fl_csp; + sp->sp_hi_ver = 0x20; + sp->sp_lo_ver = 0x20; + sp->sp_bb_cred = htons(10); /* this gets set by gateway */ + sp->sp_bb_data = htons((u16) lport->mfs); + cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); } /** @@ -296,6 +321,10 @@ static inline int fc_els_fill(struct fc_lport *lport, fc_flogi_fill(lport, fp); break; + case ELS_FDISC: + fc_fdisc_fill(lport, fp); + break; + case ELS_LOGO: fc_logo_fill(lport, fp); break; -- cgit v1.2.3-59-g8ed1b From 11b561886643d4e23d0fd58c205d830a448dd0a2 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:29 -0800 Subject: [SCSI] libfcoe, fcoe: libfcoe NPIV support The FIP code in libfcoe needed several changes to support NPIV 1) dst_src_addr needs to be managed per-n_port-ID for FPMA fabrics with NPIV enabled. Managing the MAC address is now handled in fcoe, with some slight changes to update_mac() and a new get_src_addr() function pointer. 2) The libfc elsct_send() hook is used to setup FCoE specific response handlers for FIP encapsulated ELS exchanges. This lets the FCoE specific handling know which VN_Port the exchange is for, and doesn't require tracking OX_IDs. It might be possible to roll back to the full FIP frame in these, but for now I've just stashed the contents of the MAC address descriptor in the skb context block for later use. Also, because fcoe_elsct_send() just passes control on to fc_elsct_send(), all transmits still come through the normal frame_send() path. 3) The NPIV changes added a mutex hold in the keep alive sending, the lport mutex is protecting the vport list. We can't take a mutex from a timer, so move the FIP keep alive logic to the link work struct. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 147 ++++++++++++++++++++++++++++++++++++------ drivers/scsi/fcoe/fcoe.h | 1 + drivers/scsi/fcoe/libfcoe.c | 82 +++++++++++++---------- drivers/scsi/libfc/fc_elsct.c | 3 +- drivers/scsi/libfc/fc_lport.c | 6 +- include/scsi/fc_frame.h | 3 + include/scsi/libfc.h | 10 +++ include/scsi/libfcoe.h | 15 +++-- 8 files changed, 207 insertions(+), 60 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 8ca488de492d..a64c398c981e 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -226,7 +226,8 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, } static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); -static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new); +static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr); +static u8 *fcoe_get_src_mac(struct fc_lport *lport); static void fcoe_destroy_work(struct work_struct *work); /** @@ -254,6 +255,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) fcoe_ctlr_init(&fcoe->ctlr); fcoe->ctlr.send = fcoe_fip_send; fcoe->ctlr.update_mac = fcoe_update_src_mac; + fcoe->ctlr.get_src_addr = fcoe_get_src_mac; fcoe_interface_setup(fcoe, netdev); @@ -286,8 +288,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /* Delete secondary MAC addresses */ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_delete(netdev, flogi_maddr); - if (!is_zero_ether_addr(fip->data_src_addr)) - dev_unicast_delete(netdev, fip->data_src_addr); if (fip->spma) dev_unicast_delete(netdev, fip->ctl_src_addr); dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); @@ -369,25 +369,37 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) /** * fcoe_update_src_mac() - Update Ethernet MAC filters. - * @fip: FCoE controller. - * @old: Unicast MAC address to delete if the MAC is non-zero. - * @new: Unicast MAC address to add. + * @lport: libfc lport + * @addr: Unicast MAC address to add. * * Remove any previously-set unicast MAC filter. * Add secondary FCoE MAC address filter for our OUI. */ -static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) +static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) { - struct fcoe_interface *fcoe; + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->fcoe; - fcoe = fcoe_from_ctlr(fip); rtnl_lock(); - if (!is_zero_ether_addr(old)) - dev_unicast_delete(fcoe->netdev, old); - dev_unicast_add(fcoe->netdev, new); + if (!is_zero_ether_addr(port->data_src_addr)) + dev_unicast_delete(fcoe->netdev, port->data_src_addr); + if (!is_zero_ether_addr(addr)) + dev_unicast_add(fcoe->netdev, addr); + memcpy(port->data_src_addr, addr, ETH_ALEN); rtnl_unlock(); } +/** + * fcoe_get_src_mac() - return the Ethernet source address for an lport + * @lport: libfc lport + */ +static u8 *fcoe_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + + return port->data_src_addr; +} + /** * fcoe_lport_config() - sets up the fc_lport * @lp: ptr to the fc_lport @@ -650,6 +662,11 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); + rtnl_lock(); + if (!is_zero_ether_addr(port->data_src_addr)) + dev_unicast_delete(netdev, port->data_src_addr); + rtnl_unlock(); + /* receives may not be stopped until after this */ fcoe_interface_put(fcoe); @@ -706,10 +723,16 @@ static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) return 0; } +static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, + u32 did, struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, struct fc_frame *, void *), + void *arg, u32 timeout); + static struct libfc_function_template fcoe_libfc_fcn_templ = { .frame_send = fcoe_xmit, .ddp_setup = fcoe_ddp_setup, .ddp_done = fcoe_ddp_done, + .elsct_send = fcoe_elsct_send, }; /** @@ -1226,7 +1249,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fcoe->ctlr, skb)) + fcoe_ctlr_els_send(&fcoe->ctlr, lp, skb)) return 0; sof = fr_sof(fp); @@ -1291,7 +1314,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); else - memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN); + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); @@ -1464,11 +1487,6 @@ int fcoe_percpu_receive_thread(void *arg) } fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; } - if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) && - fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) { - fc_frame_free(fp); - continue; - } fc_exch_recv(lp, fp); } return 0; @@ -2061,3 +2079,94 @@ static void __exit fcoe_exit(void) fcoe_if_exit(); } module_exit(fcoe_exit); + +/** + * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler + * @seq: active sequence in the FLOGI or FDISC exchange + * @fp: response frame, or error encoded in a pointer (timeout) + * @arg: pointer the the fcoe_ctlr structure + * + * This handles MAC address managment for FCoE, then passes control on to + * the libfc FLOGI response handler. + */ +static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + u8 *mac; + + if (IS_ERR(fp)) + goto done; + + mac = fr_cb(fp)->granted_mac; + if (is_zero_ether_addr(mac)) { + /* pre-FIP */ + mac = eth_hdr(&fp->skb)->h_source; + if (fcoe_ctlr_recv_flogi(fip, lport, fp, mac)) { + fc_frame_free(fp); + return; + } + } else { + /* FIP, libfcoe has already seen it */ + fip->update_mac(lport, fr_cb(fp)->granted_mac); + } +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +/** + * fcoe_logo_resp() - FCoE specific LOGO response handler + * @seq: active sequence in the LOGO exchange + * @fp: response frame, or error encoded in a pointer (timeout) + * @arg: pointer the the fcoe_ctlr structure + * + * This handles MAC address managment for FCoE, then passes control on to + * the libfc LOGO response handler. + */ +static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fip->update_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +/** + * fcoe_elsct_send - FCoE specific ELS handler + * + * This does special case handling of FIP encapsualted ELS exchanges for FCoE, + * using FCoE specific response handlers and passing the FIP controller as + * the argument (the lport is still available from the exchange). + * + * Most of the work here is just handed off to the libfc routine. + */ +static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, + u32 did, struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, struct fc_frame *, void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_ctlr *fip = &fcoe->ctlr; + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, + fip, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} + diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index a123552847e5..99dfa7c2aeaa 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -104,6 +104,7 @@ struct fcoe_port { u8 fcoe_pending_queue_active; struct timer_list timer; /* queue timer */ struct work_struct destroy_work; /* to prevent rtnl deadlocks */ + u8 data_src_addr[ETH_ALEN]; }; #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index d8ea04a29199..6a93ba96569f 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -322,6 +322,7 @@ EXPORT_SYMBOL(fcoe_ctlr_link_down); /** * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF. * @fip: FCoE controller. + * @lport: libfc fc_lport to send from * @ports: 0 for controller keep-alive, 1 for port keep-alive. * @sa: source MAC address. * @@ -332,7 +333,9 @@ EXPORT_SYMBOL(fcoe_ctlr_link_down); * The source MAC is the assigned mapped source address. * The destination is the FCF's F-port. */ -static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) +static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, + struct fc_lport *lport, + int ports, u8 *sa) { struct sk_buff *skb; struct fip_kal { @@ -374,16 +377,14 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - if (ports) { vn = (struct fip_vn_desc *)(kal + 1); vn->fd_desc.fip_dtype = FIP_DT_VN_ID; vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; - memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN); + memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); hton24(vn->fd_fc_id, fc_host_port_id(lp->host)); put_unaligned_be64(lp->wwpn, &vn->fd_wwpn); } - skb_put(skb, len); skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); @@ -394,6 +395,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) /** * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it. * @fip: FCoE controller. + * @lport: libfc fc_lport to use for the source address * @dtype: FIP descriptor type for the frame. * @skb: FCoE ELS frame including FC header but no FCoE headers. * @@ -405,7 +407,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) * Headroom includes the FIP encapsulation description, FIP header, and * Ethernet header. The tailroom is for the FIP MAC descriptor. */ -static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, +static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, u8 dtype, struct sk_buff *skb) { struct fip_encaps_head { @@ -450,7 +452,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, mac->fd_desc.fip_dtype = FIP_DT_MAC; mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) - memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); + memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); else if (fip->spma) memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); @@ -463,6 +465,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, /** * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. * @fip: FCoE controller. + * @lport: libfc fc_lport to send from * @skb: FCoE ELS frame including FC header but no FCoE headers. * * Returns a non-zero error code if the frame should not be sent. @@ -471,11 +474,13 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, * The caller must check that the length is a multiple of 4. * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). */ -int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct sk_buff *skb) { struct fc_frame_header *fh; u16 old_xid; u8 op; + u8 mac[ETH_ALEN]; fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); @@ -530,14 +535,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) * FLOGI. */ fip->flogi_oxid = FC_XID_UNKNOWN; - fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id); + fc_fcoe_set_mac(mac, fh->fh_d_id); + fip->update_mac(lport, mac); return 0; default: if (fip->state != FIP_ST_ENABLED) goto drop; return 0; } - if (fcoe_ctlr_encaps(fip, op, skb)) + if (fcoe_ctlr_encaps(fip, lport, op, skb)) goto drop; fip->send(fip, skb); return -EINPROGRESS; @@ -796,7 +802,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fc_lport *lp = fip->lp; struct fip_header *fiph; - struct fc_frame *fp; + struct fc_frame *fp = (struct fc_frame *)skb; struct fc_frame_header *fh = NULL; struct fip_desc *desc; struct fip_encaps *els; @@ -835,6 +841,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) "in FIP ELS\n"); goto drop; } + memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); break; case FIP_DT_FLOGI: case FIP_DT_FDISC: @@ -865,13 +872,10 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; els_op = *(u8 *)(fh + 1); - if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && - sub == FIP_SC_REP && fip->flogi_oxid == ntohs(fh->fh_ox_id) && - els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) { + if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && + fip->flogi_oxid == ntohs(fh->fh_ox_id) && + els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) fip->flogi_oxid = FC_XID_UNKNOWN; - fip->update_mac(fip, fip->data_src_addr, granted_mac); - memcpy(fip->data_src_addr, granted_mac, ETH_ALEN); - } /* * Convert skb into an fc_frame containing only the ELS. @@ -958,7 +962,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, if (dlen < sizeof(*vp)) return; if (compare_ether_addr(vp->fd_mac, - fip->data_src_addr) == 0 && + fip->get_src_addr(lp)) == 0 && get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn && ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host)) desc_mask &= ~BIT(FIP_DT_VN_ID); @@ -1113,8 +1117,6 @@ static void fcoe_ctlr_timeout(unsigned long arg) struct fcoe_fcf *sel; struct fcoe_fcf *fcf; unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); - u8 send_ctlr_ka; - u8 send_port_ka; spin_lock_bh(&fip->lock); if (fip->state == FIP_ST_DISABLED) { @@ -1153,12 +1155,10 @@ static void fcoe_ctlr_timeout(unsigned long arg) schedule_work(&fip->link_work); } - send_ctlr_ka = 0; - send_port_ka = 0; if (sel) { if (time_after_eq(jiffies, fip->ctlr_ka_time)) { fip->ctlr_ka_time = jiffies + sel->fka_period; - send_ctlr_ka = 1; + fip->send_ctlr_ka = 1; } if (time_after(next_timer, fip->ctlr_ka_time)) next_timer = fip->ctlr_ka_time; @@ -1166,7 +1166,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) if (time_after_eq(jiffies, fip->port_ka_time)) { fip->port_ka_time += jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); - send_port_ka = 1; + fip->send_port_ka = 1; } if (time_after(next_timer, fip->port_ka_time)) next_timer = fip->port_ka_time; @@ -1176,12 +1176,9 @@ static void fcoe_ctlr_timeout(unsigned long arg) msecs_to_jiffies(FCOE_CTLR_START_DELAY); mod_timer(&fip->timer, next_timer); } + if (fip->send_ctlr_ka || fip->send_port_ka) + schedule_work(&fip->link_work); spin_unlock_bh(&fip->lock); - - if (send_ctlr_ka) - fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr); - if (send_port_ka) - fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr); } /** @@ -1196,6 +1193,8 @@ static void fcoe_ctlr_timeout(unsigned long arg) static void fcoe_ctlr_link_work(struct work_struct *work) { struct fcoe_ctlr *fip; + struct fc_lport *vport; + u8 *mac; int link; int last_link; @@ -1212,6 +1211,22 @@ static void fcoe_ctlr_link_work(struct work_struct *work) else fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); } + + if (fip->send_ctlr_ka) { + fip->send_ctlr_ka = 0; + fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); + } + if (fip->send_port_ka) { + fip->send_port_ka = 0; + mutex_lock(&fip->lp->lp_mutex); + mac = fip->get_src_addr(fip->lp); + fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); + list_for_each_entry(vport, &fip->lp->vports, list) { + mac = fip->get_src_addr(vport); + fcoe_ctlr_send_keep_alive(fip, vport, 1, mac); + } + mutex_unlock(&fip->lp->lp_mutex); + } } /** @@ -1236,6 +1251,7 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) /** * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request. * @fip: FCoE controller. + * @lport: libfc fc_lport instance received on * @fp: FC frame. * @sa: Ethernet source MAC address from received FCoE frame. * @@ -1248,7 +1264,8 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) * * Return non-zero if the frame should not be delivered to libfc. */ -int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, + struct fc_frame *fp, u8 *sa) { struct fc_frame_header *fh; u8 op; @@ -1283,11 +1300,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) fip->map_dest = 0; } fip->flogi_oxid = FC_XID_UNKNOWN; - memcpy(mac, fip->data_src_addr, ETH_ALEN); - fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id); + fc_fcoe_set_mac(mac, fh->fh_d_id); + fip->update_mac(lport, mac); spin_unlock_bh(&fip->lock); - - fip->update_mac(fip, mac, fip->data_src_addr); } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { /* * Save source MAC for point-to-point responses. @@ -1370,3 +1385,4 @@ int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); + diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 92984587ff4d..aae54fe3b299 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -31,7 +31,7 @@ /* * fc_elsct_send - sends ELS/CT frame */ -static struct fc_seq *fc_elsct_send(struct fc_lport *lport, +struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, struct fc_frame *fp, unsigned int op, @@ -63,6 +63,7 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); } +EXPORT_SYMBOL(fc_elsct_send); int fc_elsct_init(struct fc_lport *lport) { diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ccba67ca68a1..807f5b3e4efe 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1320,7 +1320,7 @@ static void fc_lport_timeout(struct work_struct *work) * held, but it will lock, call an _enter_* function or fc_lport_error * and then unlock the lport. */ -static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, +void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; @@ -1357,6 +1357,7 @@ out: err: mutex_unlock(&lport->lp_mutex); } +EXPORT_SYMBOL(fc_lport_logo_resp); /** * fc_rport_enter_logo() - Logout of the fabric @@ -1397,7 +1398,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) * held, but it will lock, call an _enter_* function or fc_lport_error * and then unlock the lport. */ -static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, +void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; @@ -1480,6 +1481,7 @@ out: err: mutex_unlock(&lport->lp_mutex); } +EXPORT_SYMBOL(fc_lport_flogi_resp); /** * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index 148126dcf9e9..ab2f8d41761b 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -28,6 +28,8 @@ #include #include +#include + /* * The fc_frame interface is used to pass frame data between functions. * The frame includes the data buffer, length, and SOF / EOF delimiter types. @@ -67,6 +69,7 @@ struct fcoe_rcv_info { enum fc_sof fr_sof; /* start of frame delimiter */ enum fc_eof fr_eof; /* end of frame delimiter */ u8 fr_flags; /* flags - see below */ + u8 granted_mac[ETH_ALEN]; /* FCoE MAC address */ }; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index dfeb1ee4f03f..dad66ce8673d 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -900,6 +900,16 @@ void fc_fcp_destroy(struct fc_lport *); * Initializes ELS/CT interface */ int fc_elsct_init(struct fc_lport *lp); +struct fc_seq *fc_elsct_send(struct fc_lport *lport, + u32 did, + struct fc_frame *fp, + unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void *arg, u32 timer_msec); +void fc_lport_flogi_resp(struct fc_seq *, struct fc_frame *, void *); +void fc_lport_logo_resp(struct fc_seq *, struct fc_frame *, void *); /* diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index b2410605b740..8ef5e209c216 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -74,11 +74,13 @@ enum fip_state { * @last_link: last link state reported to libfc. * @map_dest: use the FC_MAP mode for destination MAC addresses. * @spma: supports SPMA server-provided MACs mode + * @send_ctlr_ka: need to send controller keep alive + * @send_port_ka: need to send port keep alives * @dest_addr: MAC address of the selected FC forwarder. * @ctl_src_addr: the native MAC address of our local port. - * @data_src_addr: the assigned MAC address for the local port after FLOGI. * @send: LLD-supplied function to handle sending of FIP Ethernet frames. * @update_mac: LLD-supplied function to handle changes to MAC addresses. + * @get_src_addr: LLD-supplied function to supply a source MAC address. * @lock: lock protecting this structure. * * This structure is used by all FCoE drivers. It contains information @@ -106,12 +108,14 @@ struct fcoe_ctlr { u8 last_link; u8 map_dest; u8 spma; + u8 send_ctlr_ka; + u8 send_port_ka; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; - u8 data_src_addr[ETH_ALEN]; void (*send)(struct fcoe_ctlr *, struct sk_buff *); - void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new); + void (*update_mac)(struct fc_lport *, u8 *addr); + u8 * (*get_src_addr)(struct fc_lport *); spinlock_t lock; }; @@ -155,9 +159,10 @@ void fcoe_ctlr_init(struct fcoe_ctlr *); void fcoe_ctlr_destroy(struct fcoe_ctlr *); void fcoe_ctlr_link_up(struct fcoe_ctlr *); int fcoe_ctlr_link_down(struct fcoe_ctlr *); -int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *); +int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct fc_lport *, struct sk_buff *); void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *); -int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa); +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *lport, + struct fc_frame *fp, u8 *sa); /* libfcoe funcs */ u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); -- cgit v1.2.3-59-g8ed1b From e9084bb8b4414dc1cfb840ac5a86fac23fccd013 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:34 -0800 Subject: [SCSI] fcoe: add a separate scsi transport template for NPIV vports Right now it's exactly the same as the physical port template, and there is no way to create a port on anything other than the netdev. When the vport_create entry point gets hooked up it will create lports on top of vport devices, which will use this. Rename scsi_transport_fcoe_sw to fcoe_transport_template to be more clear with naming now that there are two templates. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 54 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index a64c398c981e..d37d5739799c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -90,7 +90,8 @@ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, }; -static struct scsi_transport_template *scsi_transport_fcoe_sw; +static struct scsi_transport_template *fcoe_transport_template; +static struct scsi_transport_template *fcoe_vport_transport_template; struct fc_function_template fcoe_transport_function = { .show_host_node_name = 1, @@ -125,6 +126,39 @@ struct fc_function_template fcoe_transport_function = { .terminate_rport_io = fc_rport_terminate_io, }; +struct fc_function_template fcoe_vport_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, +}; + static struct scsi_host_template fcoe_shost_template = { .module = THIS_MODULE, .name = "FCoE Driver", @@ -530,7 +564,10 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, lp->host->max_lun = FCOE_MAX_LUN; lp->host->max_id = FCOE_MAX_FCP_TARGET; lp->host->max_channel = 0; - lp->host->transportt = scsi_transport_fcoe_sw; + if (lp->vport) + lp->host->transportt = fcoe_vport_transport_template; + else + lp->host->transportt = fcoe_transport_template; /* add the new host to the SCSI-ml */ rc = scsi_add_host(lp->host, dev); @@ -836,10 +873,11 @@ out: static int __init fcoe_if_init(void) { /* attach to scsi transport */ - scsi_transport_fcoe_sw = - fc_attach_transport(&fcoe_transport_function); + fcoe_transport_template = fc_attach_transport(&fcoe_transport_function); + fcoe_vport_transport_template = + fc_attach_transport(&fcoe_vport_transport_function); - if (!scsi_transport_fcoe_sw) { + if (!fcoe_transport_template) { printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); return -ENODEV; } @@ -854,8 +892,10 @@ static int __init fcoe_if_init(void) */ int __exit fcoe_if_exit(void) { - fc_release_transport(scsi_transport_fcoe_sw); - scsi_transport_fcoe_sw = NULL; + fc_release_transport(fcoe_transport_template); + fc_release_transport(fcoe_vport_transport_template); + fcoe_transport_template = NULL; + fcoe_vport_transport_template = NULL; return 0; } -- cgit v1.2.3-59-g8ed1b From 9a05753b23c171b6a45e393ec2b9bc034d31bec8 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:40 -0800 Subject: [SCSI] fcoe: NPIV vport create/destroy Add NPIV vport create and destroy handlers and register them with the FC transport. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 162 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 139 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d37d5739799c..f23cdb38d5c3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -93,6 +93,10 @@ static struct notifier_block fcoe_notifier = { static struct scsi_transport_template *fcoe_transport_template; static struct scsi_transport_template *fcoe_vport_transport_template; +static int fcoe_vport_destroy(struct fc_vport *vport); +static int fcoe_vport_create(struct fc_vport *vport, bool disabled); +static int fcoe_vport_disable(struct fc_vport *vport, bool disable); + struct fc_function_template fcoe_transport_function = { .show_host_node_name = 1, .show_host_port_name = 1, @@ -124,6 +128,10 @@ struct fc_function_template fcoe_transport_function = { .issue_fc_host_lip = fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, + + .vport_create = fcoe_vport_create, + .vport_delete = fcoe_vport_destroy, + .vport_disable = fcoe_vport_disable, }; struct fc_function_template fcoe_vport_transport_function = { @@ -450,6 +458,7 @@ static int fcoe_lport_config(struct fc_lport *lp) lp->r_a_tov = 2 * 2 * 1000; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lp->does_npiv = 1; fc_lport_init_stats(lp); @@ -536,11 +545,13 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) port->fcoe_pending_queue_active = 0; setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); - wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); - fc_set_wwnn(lp, wwnn); - /* XXX - 3rd arg needs to be vlan id */ - wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); - fc_set_wwpn(lp, wwpn); + if (!lp->vport) { + wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); + fc_set_wwnn(lp, wwnn); + /* XXX - 3rd arg needs to be vlan id */ + wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); + fc_set_wwpn(lp, wwpn); + } return 0; } @@ -576,6 +587,10 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, "error on scsi_add_host\n"); return rc; } + + if (!lp->vport) + fc_host_max_npiv_vports(lp->host) = USHORT_MAX; + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", FCOE_NAME, FCOE_VERSION, fcoe_netdev(lp)->name); @@ -776,24 +791,35 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { * fcoe_if_create() - this function creates the fcoe port * @fcoe: fcoe_interface structure to create an fc_lport instance on * @parent: device pointer to be the parent in sysfs for the SCSI host + * @npiv: is this a vport? * * Creates fc_lport struct and scsi_host for lport, configures lport. * * Returns : The allocated fc_lport or an error pointer */ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, - struct device *parent) + struct device *parent, int npiv) { int rc; struct fc_lport *lport = NULL; struct fcoe_port *port; struct Scsi_Host *shost; struct net_device *netdev = fcoe->netdev; + /* + * parent is only a vport if npiv is 1, + * but we'll only use vport in that case so go ahead and set it + */ + struct fc_vport *vport = dev_to_vport(parent); FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - lport = libfc_host_alloc(&fcoe_shost_template, - sizeof(struct fcoe_port)); + if (!npiv) { + lport = libfc_host_alloc(&fcoe_shost_template, + sizeof(struct fcoe_port)); + } else { + lport = libfc_vport_create(vport, + sizeof(struct fcoe_port)); + } if (!lport) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); rc = -ENOMEM; @@ -813,6 +839,13 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_host_put; } + if (npiv) { + FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + /* configure lport network properties */ rc = fcoe_netdev_config(lport, netdev); if (rc) { @@ -837,21 +870,24 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_lp_destroy; } - /* - * fcoe_em_alloc() and fcoe_hostlist_add() both - * need to be atomic with respect to other changes to the hostlist - * since fcoe_em_alloc() looks for an existing EM - * instance on host list updated by fcoe_hostlist_add(). - * - * This is currently handled through the fcoe_config_mutex begin held. - */ + if (!npiv) { + /* + * fcoe_em_alloc() and fcoe_hostlist_add() both + * need to be atomic with respect to other changes to the + * hostlist since fcoe_em_alloc() looks for an existing EM + * instance on host list updated by fcoe_hostlist_add(). + * + * This is currently handled through the fcoe_config_mutex + * begin held. + */ - /* lport exch manager allocation */ - rc = fcoe_em_config(lport); - if (rc) { - FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " - "interface\n"); - goto out_lp_destroy; + /* lport exch manager allocation */ + rc = fcoe_em_config(lport); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure the EM " + "for the interface\n"); + goto out_lp_destroy; + } } fcoe_interface_get(fcoe); @@ -1806,7 +1842,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) goto out_putdev; } - lport = fcoe_if_create(fcoe, &netdev->dev); + lport = fcoe_if_create(fcoe, &netdev->dev, 0); if (IS_ERR(lport)) { printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); @@ -2113,6 +2149,9 @@ static void __exit fcoe_exit(void) /* flush any asyncronous interface destroys, * this should happen after the netdev notifier is unregistered */ flush_scheduled_work(); + /* That will flush out all the N_Ports on the hostlist, but now we + * may have NPIV VN_Ports scheduled for destruction */ + flush_scheduled_work(); /* detach from scsi transport * must happen after all destroys are done, therefor after the flush */ @@ -2210,3 +2249,80 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); } +/** + * fcoe_vport_create() - create an fc_host/scsi_host for a vport + * @vport: fc_vport object to create a new fc_host for + * @disabled: start the new fc_host in a disabled state by default? + * + * Returns: 0 for success + */ +static int fcoe_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct fcoe_interface *fcoe = port->fcoe; + struct net_device *netdev = fcoe->netdev; + struct fc_lport *vn_port; + + mutex_lock(&fcoe_config_mutex); + vn_port = fcoe_if_create(fcoe, &vport->dev, 1); + mutex_unlock(&fcoe_config_mutex); + + if (IS_ERR(vn_port)) { + printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", + netdev->name); + return -EIO; + } + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +/** + * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport + * @vport: fc_vport object that is being destroyed + * + * Returns: 0 for success + */ +static int fcoe_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct fcoe_port *port = lport_priv(vn_port); + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + schedule_work(&port->destroy_work); + return 0; +} + +/** + * fcoe_vport_disable() - change vport state + * @vport: vport to bring online/offline + * @disable: should the vport be disabled? + */ +static int fcoe_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + + return 0; +} + -- cgit v1.2.3-59-g8ed1b From 28cc0e31d874af05244da421e05565f2ba72fd5c Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:46 -0800 Subject: [SCSI] libfc: RPN_ID is obsolete and unnecessary RPN_ID has been obsolete per FC-GS-5 for several years. The port name is registered implicitly as part of FLOGI, and it is undesirable for ports to change a registered port name using RPN_ID while logged into the fabric. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 95 ++----------------------------------------- include/scsi/libfc.h | 1 - 2 files changed, 3 insertions(+), 93 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 807f5b3e4efe..47577e4a2e87 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -108,7 +108,6 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); -static void fc_lport_enter_rpn_id(struct fc_lport *); static void fc_lport_enter_rft_id(struct fc_lport *); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); @@ -118,7 +117,6 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_DISABLED] = "disabled", [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", - [LPORT_ST_RPN_ID] = "RPN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", @@ -153,7 +151,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { lport->dns_rp = rdata; - fc_lport_enter_rpn_id(lport); + fc_lport_enter_rft_id(lport); } else { FC_LPORT_DBG(lport, "Received an READY event " "on port (%6x) for the directory " @@ -965,7 +963,6 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_DISABLED: case LPORT_ST_READY: case LPORT_ST_RESET: - case LPORT_ST_RPN_ID: case LPORT_ST_RFT_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: @@ -980,8 +977,8 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) /** * fc_lport_rft_id_resp() - Handle response to Register Fibre - * Channel Types by ID (RPN_ID) request - * @sp: current sequence in RPN_ID exchange + * Channel Types by ID (RFT_ID) request + * @sp: current sequence in RFT_ID exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance * @@ -1032,60 +1029,6 @@ err: mutex_unlock(&lport->lp_mutex); } -/** - * fc_lport_rpn_id_resp() - Handle response to Register Port - * Name by ID (RPN_ID) request - * @sp: current sequence in RPN_ID exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance - * - * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error - * and then unlock the lport. - */ -static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) -{ - struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; - struct fc_ct_hdr *ct; - - FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp)); - - if (fp == ERR_PTR(-FC_EX_CLOSED)) - return; - - mutex_lock(&lport->lp_mutex); - - if (lport->state != LPORT_ST_RPN_ID) { - FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " - "%s\n", fc_lport_state(lport)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_lport_error(lport, fp); - goto err; - } - - fh = fc_frame_header_get(fp); - ct = fc_frame_payload_get(fp, sizeof(*ct)); - if (fh && ct && fh->fh_type == FC_TYPE_CT && - ct->ct_fs_type == FC_FST_DIR && - ct->ct_fs_subtype == FC_NS_SUBTYPE && - ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_rft_id(lport); - else - fc_lport_error(lport, fp); - -out: - fc_frame_free(fp); -err: - mutex_unlock(&lport->lp_mutex); -} - /** * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request * @sp: current sequence in SCR exchange @@ -1203,35 +1146,6 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) fc_lport_error(lport, fp); } -/** - * fc_rport_enter_rft_id() - Register port name with the name server - * @lport: Fibre Channel local port to register - * - * Locking Note: The lport lock is expected to be held before calling - * this routine. - */ -static void fc_lport_enter_rpn_id(struct fc_lport *lport) -{ - struct fc_frame *fp; - - FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RPN_ID); - - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rn_id)); - if (!fp) { - fc_lport_error(lport, fp); - return; - } - - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, - fc_lport_rpn_id_resp, - lport, lport->e_d_tov)) - fc_lport_error(lport, NULL); -} - static struct fc_rport_operations fc_lport_rport_ops = { .event_callback = fc_lport_rport_callback, }; @@ -1293,9 +1207,6 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_DNS: fc_lport_enter_dns(lport); break; - case LPORT_ST_RPN_ID: - fc_lport_enter_rpn_id(lport); - break; case LPORT_ST_RFT_ID: fc_lport_enter_rft_id(lport); break; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index dad66ce8673d..75be713ea036 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -61,7 +61,6 @@ enum fc_lport_state { LPORT_ST_DISABLED = 0, LPORT_ST_FLOGI, LPORT_ST_DNS, - LPORT_ST_RPN_ID, LPORT_ST_RFT_ID, LPORT_ST_SCR, LPORT_ST_READY, -- cgit v1.2.3-59-g8ed1b From c9c7bd7a5e7321aa96289c9b48fdbcc828c105e6 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:51 -0800 Subject: [SCSI] libfc: RNN_ID may be required before RSNN_NN with some switches One could interpret FC-GS-5 to say that an explicit RNN_ID is required before RSNN_NN is allowed to succeed, which is why RNN_ID was not obsoleted along with RPN_ID acording to this document: ftp://ftp.t11.org/t11/member/fc/gs-5/05-546v2.pdf Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 91 ++++++++++++++++++++++++++++++++++++++++++- include/scsi/fc_encode.h | 4 +- include/scsi/libfc.h | 1 + 3 files changed, 93 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 47577e4a2e87..897b5a8487e2 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -108,6 +108,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); +static void fc_lport_enter_rnn_id(struct fc_lport *); static void fc_lport_enter_rft_id(struct fc_lport *); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); @@ -117,6 +118,7 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_DISABLED] = "disabled", [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", + [LPORT_ST_RNN_ID] = "RNN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", @@ -151,7 +153,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { lport->dns_rp = rdata; - fc_lport_enter_rft_id(lport); + fc_lport_enter_rnn_id(lport); } else { FC_LPORT_DBG(lport, "Received an READY event " "on port (%6x) for the directory " @@ -963,6 +965,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_DISABLED: case LPORT_ST_READY: case LPORT_ST_RESET: + case LPORT_ST_RNN_ID: case LPORT_ST_RFT_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: @@ -1029,6 +1032,60 @@ err: mutex_unlock(&lport->lp_mutex); } +/** + * fc_lport_rnn_id_resp() - Handle response to Register Node + * Name by ID (RNN_ID) request + * @sp: current sequence in RNN_ID exchange + * @fp: response frame + * @lp_arg: Fibre Channel host port instance + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error + * and then unlock the lport. + */ +static void fc_lport_rnn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + + FC_LPORT_DBG(lport, "Received a RNN_ID %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_RNN_ID) { + FC_LPORT_DBG(lport, "Received a RNN_ID response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (fh && ct && fh->fh_type == FC_TYPE_CT && + ct->ct_fs_type == FC_FST_DIR && + ct->ct_fs_subtype == FC_NS_SUBTYPE && + ntohs(ct->ct_cmd) == FC_FS_ACC) + fc_lport_enter_rft_id(lport); + else + fc_lport_error(lport, fp); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} + /** * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request * @sp: current sequence in SCR exchange @@ -1146,6 +1203,35 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) fc_lport_error(lport, fp); } +/** + * fc_rport_enter_rnn_id() - Register node name with the name server + * @lport: Fibre Channel local port to register + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static void fc_lport_enter_rnn_id(struct fc_lport *lport) +{ + struct fc_frame *fp; + + FC_LPORT_DBG(lport, "Entered RNN_ID state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_RNN_ID); + + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rn_id)); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RNN_ID, + fc_lport_rnn_id_resp, + lport, lport->e_d_tov)) + fc_lport_error(lport, fp); +} + static struct fc_rport_operations fc_lport_rport_ops = { .event_callback = fc_lport_rport_callback, }; @@ -1207,6 +1293,9 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_DNS: fc_lport_enter_dns(lport); break; + case LPORT_ST_RNN_ID: + fc_lport_enter_rnn_id(lport); + break; case LPORT_ST_RFT_ID: fc_lport_enter_rft_id(lport); break; diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index c93ca3ece1a0..ad13cb1c3eec 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -128,12 +128,12 @@ static inline int fc_ct_fill(struct fc_lport *lport, ct->payload.rft.fts = lport->fcts; break; - case FC_NS_RPN_ID: + case FC_NS_RNN_ID: ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); hton24(ct->payload.rn.fr_fid.fp_fid, fc_host_port_id(lport->host)); ct->payload.rft.fts = lport->fcts; - put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn); + put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); break; default: diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 75be713ea036..3d22dfd67209 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -61,6 +61,7 @@ enum fc_lport_state { LPORT_ST_DISABLED = 0, LPORT_ST_FLOGI, LPORT_ST_DNS, + LPORT_ST_RNN_ID, LPORT_ST_RFT_ID, LPORT_ST_SCR, LPORT_ST_READY, -- cgit v1.2.3-59-g8ed1b From 5baa17c3e66fc2e414f501b2dd59b962dfc64919 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:46:56 -0800 Subject: [SCSI] libfc: Register Symbolic Node Name (RSNN_NN) Register the fc_host symbolic name as the symbolic node name with the fabric name server. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 6 +-- drivers/scsi/libfc/fc_lport.c | 91 +++++++++++++++++++++++++++++++++++++++++++ include/scsi/fc/fc_ns.h | 10 +++++ include/scsi/fc_encode.h | 10 +++++ include/scsi/libfc.h | 1 + 5 files changed, 115 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f23cdb38d5c3..437eacf2732d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -591,9 +591,9 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, if (!lp->vport) fc_host_max_npiv_vports(lp->host) = USHORT_MAX; - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", - FCOE_NAME, FCOE_VERSION, - fcoe_netdev(lp)->name); + snprintf(fc_host_symbolic_name(lp->host), FC_SYMBOLIC_NAME_SIZE, + "%s v%s over %s", FCOE_NAME, FCOE_VERSION, + fcoe_netdev(lp)->name); return 0; } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 897b5a8487e2..cc389c03f698 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -109,6 +109,7 @@ static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); static void fc_lport_enter_rnn_id(struct fc_lport *); +static void fc_lport_enter_rsnn_nn(struct fc_lport *); static void fc_lport_enter_rft_id(struct fc_lport *); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); @@ -119,6 +120,7 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", [LPORT_ST_RNN_ID] = "RNN_ID", + [LPORT_ST_RSNN_NN] = "RSNN_NN", [LPORT_ST_RFT_ID] = "RFT_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", @@ -966,6 +968,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_READY: case LPORT_ST_RESET: case LPORT_ST_RNN_ID: + case LPORT_ST_RSNN_NN: case LPORT_ST_RFT_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: @@ -1032,6 +1035,60 @@ err: mutex_unlock(&lport->lp_mutex); } +/** + * fc_lport_rsnn_nn_resp() - Handle response to Register Symbolic Node Name + * by Node Name (RSNN_NN) request + * @sp: current sequence in RSNN_NN exchange + * @fp: response frame + * @lp_arg: Fibre Channel host port instance + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error + * and then unlock the lport. + */ +static void fc_lport_rsnn_nn_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + + FC_LPORT_DBG(lport, "Received a RSNN_NN %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_RSNN_NN) { + FC_LPORT_DBG(lport, "Received a RSNN_NN response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (fh && ct && fh->fh_type == FC_TYPE_CT && + ct->ct_fs_type == FC_FST_DIR && + ct->ct_fs_subtype == FC_NS_SUBTYPE && + ntohs(ct->ct_cmd) == FC_FS_ACC) + fc_lport_enter_rsnn_nn(lport); + else + fc_lport_error(lport, fp); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} + /** * fc_lport_rnn_id_resp() - Handle response to Register Node * Name by ID (RNN_ID) request @@ -1203,6 +1260,37 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) fc_lport_error(lport, fp); } +/** + * fc_rport_enter_rsnn_nn() - Register symbolic node name with the name server + * @lport: Fibre Channel local port to register + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static void fc_lport_enter_rsnn_nn(struct fc_lport *lport) +{ + struct fc_frame *fp; + size_t len; + + FC_LPORT_DBG(lport, "Entered RSNN_NN state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_RSNN_NN); + + len = strnlen(fc_host_symbolic_name(lport->host), 255); + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rsnn) + len); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSNN_NN, + fc_lport_rsnn_nn_resp, + lport, lport->e_d_tov)) + fc_lport_error(lport, fp); +} + /** * fc_rport_enter_rnn_id() - Register node name with the name server * @lport: Fibre Channel local port to register @@ -1296,6 +1384,9 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_RNN_ID: fc_lport_enter_rnn_id(lport); break; + case LPORT_ST_RSNN_NN: + fc_lport_enter_rsnn_nn(lport); + break; case LPORT_ST_RFT_ID: fc_lport_enter_rft_id(lport); break; diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h index 790d7b97d4bc..fa8283056325 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/scsi/fc/fc_ns.h @@ -47,6 +47,7 @@ enum fc_ns_req { FC_NS_RFT_ID = 0x0217, /* reg FC4 type for ID */ FC_NS_RPN_ID = 0x0212, /* reg port name for ID */ FC_NS_RNN_ID = 0x0213, /* reg node name for ID */ + FC_NS_RSNN_NN = 0x0239, /* reg symbolic node name */ }; /* @@ -156,4 +157,13 @@ struct fc_ns_rn_id { __be64 fr_wwn; /* node name or port name */ } __attribute__((__packed__)); +/* + * RSNN_NN request - register symbolic node name + */ +struct fc_ns_rsnn { + __be64 fr_wwn; /* node name */ + __u8 fr_name_len; + char fr_name[]; +} __attribute__((__packed__)); + #endif /* _FC_NS_H_ */ diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index ad13cb1c3eec..89981afba72d 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -33,6 +33,7 @@ struct fc_ct_req { struct fc_ns_rn_id rn; struct fc_ns_rft rft; struct fc_ns_fid fid; + struct fc_ns_rsnn snn; } payload; }; @@ -136,6 +137,15 @@ static inline int fc_ct_fill(struct fc_lport *lport, put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); break; + case FC_NS_RSNN_NN: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn)); + put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); + strncpy(ct->payload.snn.fr_name, + fc_host_symbolic_name(lport->host), 255); + ct->payload.snn.fr_name_len = + strnlen(ct->payload.snn.fr_name, 255); + break; + default: return -EINVAL; } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 3d22dfd67209..1a632069c402 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -62,6 +62,7 @@ enum fc_lport_state { LPORT_ST_FLOGI, LPORT_ST_DNS, LPORT_ST_RNN_ID, + LPORT_ST_RSNN_NN, LPORT_ST_RFT_ID, LPORT_ST_SCR, LPORT_ST_READY, -- cgit v1.2.3-59-g8ed1b From c9866a548024c33e30f35a14bbcb71ba78266383 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:47:01 -0800 Subject: [SCSI] libfc: Register Symbolic Port Name (RSPN_ID) Register the fc_host symbolic name as the symbolic port name with the fabric name server. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 90 +++++++++++++++++++++++++++++++++++++++++++ include/scsi/fc/fc_ns.h | 10 +++++ include/scsi/fc_encode.h | 11 ++++++ include/scsi/libfc.h | 1 + 4 files changed, 112 insertions(+) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index cc389c03f698..28a35da1493b 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -110,6 +110,7 @@ static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); static void fc_lport_enter_rnn_id(struct fc_lport *); static void fc_lport_enter_rsnn_nn(struct fc_lport *); +static void fc_lport_enter_rspn_id(struct fc_lport *); static void fc_lport_enter_rft_id(struct fc_lport *); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); @@ -121,6 +122,7 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_DNS] = "dNS", [LPORT_ST_RNN_ID] = "RNN_ID", [LPORT_ST_RSNN_NN] = "RSNN_NN", + [LPORT_ST_RSPN_ID] = "RSPN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", @@ -969,6 +971,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_RESET: case LPORT_ST_RNN_ID: case LPORT_ST_RSNN_NN: + case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: @@ -1035,6 +1038,59 @@ err: mutex_unlock(&lport->lp_mutex); } +/** + * fc_lport_rspn_id_resp() - Handle response to Register Symbolic Port Name + * by ID (RSPN_ID) request + * @sp: current sequence in RSPN_ID exchange + * @fp: response frame + * @lp_arg: Fibre Channel host port instance + * + * Locking Note: This function will be called without the lport lock + * held, but it will lock, call an _enter_* function or fc_lport_error + * and then unlock the lport. + */ +static void fc_lport_rspn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) +{ + struct fc_lport *lport = lp_arg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + + FC_LPORT_DBG(lport, "Received a RSPN_ID %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&lport->lp_mutex); + + if (lport->state != LPORT_ST_RSPN_ID) { + FC_LPORT_DBG(lport, "Received a RSPN_ID response, but in state " + "%s\n", fc_lport_state(lport)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_lport_error(lport, fp); + goto err; + } + + fh = fc_frame_header_get(fp); + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (fh && ct && fh->fh_type == FC_TYPE_CT && + ct->ct_fs_type == FC_FST_DIR && + ct->ct_fs_subtype == FC_NS_SUBTYPE && + ntohs(ct->ct_cmd) == FC_FS_ACC) + fc_lport_enter_rspn_id(lport); + else + fc_lport_error(lport, fp); + +out: + fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); +} /** * fc_lport_rsnn_nn_resp() - Handle response to Register Symbolic Node Name * by Node Name (RSNN_NN) request @@ -1260,6 +1316,37 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) fc_lport_error(lport, fp); } +/** + * fc_rport_enter_rspn_id() - Register symbolic port name with the name server + * @lport: Fibre Channel local port to register + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static void fc_lport_enter_rspn_id(struct fc_lport *lport) +{ + struct fc_frame *fp; + size_t len; + + FC_LPORT_DBG(lport, "Entered RSPN_ID state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_RSPN_ID); + + len = strnlen(fc_host_symbolic_name(lport->host), 255); + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rspn) + len); + if (!fp) { + fc_lport_error(lport, fp); + return; + } + + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, + fc_lport_rspn_id_resp, + lport, lport->e_d_tov)) + fc_lport_error(lport, fp); +} + /** * fc_rport_enter_rsnn_nn() - Register symbolic node name with the name server * @lport: Fibre Channel local port to register @@ -1387,6 +1474,9 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_RSNN_NN: fc_lport_enter_rsnn_nn(lport); break; + case LPORT_ST_RSPN_ID: + fc_lport_enter_rspn_id(lport); + break; case LPORT_ST_RFT_ID: fc_lport_enter_rft_id(lport); break; diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h index fa8283056325..3fd59a2cb81f 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/scsi/fc/fc_ns.h @@ -47,6 +47,7 @@ enum fc_ns_req { FC_NS_RFT_ID = 0x0217, /* reg FC4 type for ID */ FC_NS_RPN_ID = 0x0212, /* reg port name for ID */ FC_NS_RNN_ID = 0x0213, /* reg node name for ID */ + FC_NS_RSPN_ID = 0x0218, /* reg symbolic port name */ FC_NS_RSNN_NN = 0x0239, /* reg symbolic node name */ }; @@ -166,4 +167,13 @@ struct fc_ns_rsnn { char fr_name[]; } __attribute__((__packed__)); +/* + * RSPN_ID request - register symbolic port name + */ +struct fc_ns_rspn { + struct fc_ns_fid fr_fid; /* port ID object */ + __u8 fr_name_len; + char fr_name[]; +} __attribute__((__packed__)); + #endif /* _FC_NS_H_ */ diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 89981afba72d..9afcbb94ec30 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -34,6 +34,7 @@ struct fc_ct_req { struct fc_ns_rft rft; struct fc_ns_fid fid; struct fc_ns_rsnn snn; + struct fc_ns_rspn spn; } payload; }; @@ -137,6 +138,16 @@ static inline int fc_ct_fill(struct fc_lport *lport, put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); break; + case FC_NS_RSPN_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn)); + hton24(ct->payload.spn.fr_fid.fp_fid, + fc_host_port_id(lport->host)); + strncpy(ct->payload.spn.fr_name, + fc_host_symbolic_name(lport->host), 255); + ct->payload.spn.fr_name_len = + strnlen(ct->payload.spn.fr_name, 255); + break; + case FC_NS_RSNN_NN: ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn)); put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 1a632069c402..8258edfa328c 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -63,6 +63,7 @@ enum fc_lport_state { LPORT_ST_DNS, LPORT_ST_RNN_ID, LPORT_ST_RSNN_NN, + LPORT_ST_RSPN_ID, LPORT_ST_RFT_ID, LPORT_ST_SCR, LPORT_ST_READY, -- cgit v1.2.3-59-g8ed1b From 7cccc157119be9b3f57e03a5ae197ba0a6a8a89f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:47:07 -0800 Subject: [SCSI] libfc: combine name server registration response handlers They all do the same thing, so combine them into a single function. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 205 +++++++----------------------------------- 1 file changed, 30 insertions(+), 175 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 28a35da1493b..f67ca680eb63 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -985,9 +985,9 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) } /** - * fc_lport_rft_id_resp() - Handle response to Register Fibre - * Channel Types by ID (RFT_ID) request - * @sp: current sequence in RFT_ID exchange + * fc_lport_ns_resp() - Handle response to a name server + * registration exchange + * @sp: current sequence in exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance * @@ -995,130 +995,23 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) * held, but it will lock, call an _enter_* function or fc_lport_error * and then unlock the lport. */ -static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) +static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; - FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp)); + FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - if (lport->state != LPORT_ST_RFT_ID) { - FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " - "%s\n", fc_lport_state(lport)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_lport_error(lport, fp); - goto err; - } - - fh = fc_frame_header_get(fp); - ct = fc_frame_payload_get(fp, sizeof(*ct)); - - if (fh && ct && fh->fh_type == FC_TYPE_CT && - ct->ct_fs_type == FC_FST_DIR && - ct->ct_fs_subtype == FC_NS_SUBTYPE && - ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_scr(lport); - else - fc_lport_error(lport, fp); -out: - fc_frame_free(fp); -err: - mutex_unlock(&lport->lp_mutex); -} - -/** - * fc_lport_rspn_id_resp() - Handle response to Register Symbolic Port Name - * by ID (RSPN_ID) request - * @sp: current sequence in RSPN_ID exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance - * - * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error - * and then unlock the lport. - */ -static void fc_lport_rspn_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) -{ - struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; - struct fc_ct_hdr *ct; - - FC_LPORT_DBG(lport, "Received a RSPN_ID %s\n", fc_els_resp_type(fp)); - - if (fp == ERR_PTR(-FC_EX_CLOSED)) - return; - - mutex_lock(&lport->lp_mutex); - - if (lport->state != LPORT_ST_RSPN_ID) { - FC_LPORT_DBG(lport, "Received a RSPN_ID response, but in state " - "%s\n", fc_lport_state(lport)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_lport_error(lport, fp); - goto err; - } - - fh = fc_frame_header_get(fp); - ct = fc_frame_payload_get(fp, sizeof(*ct)); - if (fh && ct && fh->fh_type == FC_TYPE_CT && - ct->ct_fs_type == FC_FST_DIR && - ct->ct_fs_subtype == FC_NS_SUBTYPE && - ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_rspn_id(lport); - else - fc_lport_error(lport, fp); - -out: - fc_frame_free(fp); -err: - mutex_unlock(&lport->lp_mutex); -} -/** - * fc_lport_rsnn_nn_resp() - Handle response to Register Symbolic Node Name - * by Node Name (RSNN_NN) request - * @sp: current sequence in RSNN_NN exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance - * - * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error - * and then unlock the lport. - */ -static void fc_lport_rsnn_nn_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) -{ - struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; - struct fc_ct_hdr *ct; - - FC_LPORT_DBG(lport, "Received a RSNN_NN %s\n", fc_els_resp_type(fp)); - - if (fp == ERR_PTR(-FC_EX_CLOSED)) - return; - - mutex_lock(&lport->lp_mutex); - - if (lport->state != LPORT_ST_RSNN_NN) { - FC_LPORT_DBG(lport, "Received a RSNN_NN response, but in state " - "%s\n", fc_lport_state(lport)); + if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFT_ID) { + FC_LPORT_DBG(lport, "Received a name server response, " + "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; @@ -1131,68 +1024,30 @@ static void fc_lport_rsnn_nn_resp(struct fc_seq *sp, struct fc_frame *fp, fh = fc_frame_header_get(fp); ct = fc_frame_payload_get(fp, sizeof(*ct)); - if (fh && ct && fh->fh_type == FC_TYPE_CT && - ct->ct_fs_type == FC_FST_DIR && - ct->ct_fs_subtype == FC_NS_SUBTYPE && - ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_rsnn_nn(lport); - else - fc_lport_error(lport, fp); - -out: - fc_frame_free(fp); -err: - mutex_unlock(&lport->lp_mutex); -} -/** - * fc_lport_rnn_id_resp() - Handle response to Register Node - * Name by ID (RNN_ID) request - * @sp: current sequence in RNN_ID exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance - * - * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error - * and then unlock the lport. - */ -static void fc_lport_rnn_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) -{ - struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; - struct fc_ct_hdr *ct; - - FC_LPORT_DBG(lport, "Received a RNN_ID %s\n", fc_els_resp_type(fp)); - - if (fp == ERR_PTR(-FC_EX_CLOSED)) - return; - - mutex_lock(&lport->lp_mutex); - - if (lport->state != LPORT_ST_RNN_ID) { - FC_LPORT_DBG(lport, "Received a RNN_ID response, but in state " - "%s\n", fc_lport_state(lport)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_lport_error(lport, fp); - goto err; - } - - fh = fc_frame_header_get(fp); - ct = fc_frame_payload_get(fp, sizeof(*ct)); if (fh && ct && fh->fh_type == FC_TYPE_CT && ct->ct_fs_type == FC_FST_DIR && ct->ct_fs_subtype == FC_NS_SUBTYPE && ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_rft_id(lport); + switch (lport->state) { + case LPORT_ST_RNN_ID: + fc_lport_enter_rsnn_nn(lport); + break; + case LPORT_ST_RSNN_NN: + fc_lport_enter_rspn_id(lport); + break; + case LPORT_ST_RSPN_ID: + fc_lport_enter_rft_id(lport); + break; + case LPORT_ST_RFT_ID: + fc_lport_enter_scr(lport); + break; + default: + /* should have already been caught by state checks */ + break; + } else fc_lport_error(lport, fp); - out: fc_frame_free(fp); err: @@ -1311,7 +1166,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, - fc_lport_rft_id_resp, + fc_lport_ns_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1342,7 +1197,7 @@ static void fc_lport_enter_rspn_id(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, - fc_lport_rspn_id_resp, + fc_lport_ns_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1373,7 +1228,7 @@ static void fc_lport_enter_rsnn_nn(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSNN_NN, - fc_lport_rsnn_nn_resp, + fc_lport_ns_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1402,7 +1257,7 @@ static void fc_lport_enter_rnn_id(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RNN_ID, - fc_lport_rnn_id_resp, + fc_lport_ns_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } -- cgit v1.2.3-59-g8ed1b From c914f7d16df6420cfd4c09399957425ba9c21f47 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:47:12 -0800 Subject: [SCSI] libfc: combine name server registration request functions Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 164 +++++++++++------------------------------- 1 file changed, 42 insertions(+), 122 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f67ca680eb63..dfea6c572dfb 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -108,10 +108,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); -static void fc_lport_enter_rnn_id(struct fc_lport *); -static void fc_lport_enter_rsnn_nn(struct fc_lport *); -static void fc_lport_enter_rspn_id(struct fc_lport *); -static void fc_lport_enter_rft_id(struct fc_lport *); +static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); static void fc_lport_enter_logo(struct fc_lport *); @@ -157,7 +154,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { lport->dns_rp = rdata; - fc_lport_enter_rnn_id(lport); + fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); } else { FC_LPORT_DBG(lport, "Received an READY event " "on port (%6x) for the directory " @@ -1031,13 +1028,13 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, ntohs(ct->ct_cmd) == FC_FS_ACC) switch (lport->state) { case LPORT_ST_RNN_ID: - fc_lport_enter_rsnn_nn(lport); + fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); break; case LPORT_ST_RSNN_NN: - fc_lport_enter_rspn_id(lport); + fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); break; case LPORT_ST_RSPN_ID: - fc_lport_enter_rft_id(lport); + fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); break; case LPORT_ST_RFT_ID: fc_lport_enter_scr(lport); @@ -1130,133 +1127,62 @@ static void fc_lport_enter_scr(struct fc_lport *lport) } /** - * fc_lport_enter_rft_id() - Register FC4-types with the name server + * fc_lport_enter_ns() - register some object with the name server * @lport: Fibre Channel local port to register * * Locking Note: The lport lock is expected to be held before calling * this routine. */ -static void fc_lport_enter_rft_id(struct fc_lport *lport) -{ - struct fc_frame *fp; - struct fc_ns_fts *lps; - int i; - - FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RFT_ID); - - lps = &lport->fcts; - i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]); - while (--i >= 0) - if (ntohl(lps->ff_type_map[i]) != 0) - break; - if (i < 0) { - /* nothing to register, move on to SCR */ - fc_lport_enter_scr(lport); - return; - } - - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rft)); - if (!fp) { - fc_lport_error(lport, fp); - return; - } - - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, - fc_lport_ns_resp, - lport, lport->e_d_tov)) - fc_lport_error(lport, fp); -} - -/** - * fc_rport_enter_rspn_id() - Register symbolic port name with the name server - * @lport: Fibre Channel local port to register - * - * Locking Note: The lport lock is expected to be held before calling - * this routine. - */ -static void fc_lport_enter_rspn_id(struct fc_lport *lport) +static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) { struct fc_frame *fp; + enum fc_ns_req cmd; + int size = sizeof(struct fc_ct_hdr); size_t len; - FC_LPORT_DBG(lport, "Entered RSPN_ID state from %s state\n", + FC_LPORT_DBG(lport, "Entered %s state from %s state\n", + fc_lport_state_names[state], fc_lport_state(lport)); - fc_lport_state_enter(lport, LPORT_ST_RSPN_ID); - - len = strnlen(fc_host_symbolic_name(lport->host), 255); - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rspn) + len); - if (!fp) { - fc_lport_error(lport, fp); - return; - } - - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, - fc_lport_ns_resp, - lport, lport->e_d_tov)) - fc_lport_error(lport, fp); -} + fc_lport_state_enter(lport, state); -/** - * fc_rport_enter_rsnn_nn() - Register symbolic node name with the name server - * @lport: Fibre Channel local port to register - * - * Locking Note: The lport lock is expected to be held before calling - * this routine. - */ -static void fc_lport_enter_rsnn_nn(struct fc_lport *lport) -{ - struct fc_frame *fp; - size_t len; - - FC_LPORT_DBG(lport, "Entered RSNN_NN state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RSNN_NN); - - len = strnlen(fc_host_symbolic_name(lport->host), 255); - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rsnn) + len); - if (!fp) { - fc_lport_error(lport, fp); + switch (state) { + case LPORT_ST_RNN_ID: + cmd = FC_NS_RNN_ID; + size += sizeof(struct fc_ns_rn_id); + break; + case LPORT_ST_RSNN_NN: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSNN_NN; + size += sizeof(struct fc_ns_rsnn) + len; + break; + case LPORT_ST_RSPN_ID: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSPN_ID; + size += sizeof(struct fc_ns_rspn) + len; + break; + case LPORT_ST_RFT_ID: + cmd = FC_NS_RFT_ID; + size += sizeof(struct fc_ns_rft); + break; + default: + fc_lport_error(lport, NULL); return; } - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSNN_NN, - fc_lport_ns_resp, - lport, lport->e_d_tov)) - fc_lport_error(lport, fp); -} - -/** - * fc_rport_enter_rnn_id() - Register node name with the name server - * @lport: Fibre Channel local port to register - * - * Locking Note: The lport lock is expected to be held before calling - * this routine. - */ -static void fc_lport_enter_rnn_id(struct fc_lport *lport) -{ - struct fc_frame *fp; - - FC_LPORT_DBG(lport, "Entered RNN_ID state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RNN_ID); - - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rn_id)); + fp = fc_frame_alloc(lport, size); if (!fp) { fc_lport_error(lport, fp); return; } - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RNN_ID, + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, fc_lport_ns_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); @@ -1324,16 +1250,10 @@ static void fc_lport_timeout(struct work_struct *work) fc_lport_enter_dns(lport); break; case LPORT_ST_RNN_ID: - fc_lport_enter_rnn_id(lport); - break; case LPORT_ST_RSNN_NN: - fc_lport_enter_rsnn_nn(lport); - break; case LPORT_ST_RSPN_ID: - fc_lport_enter_rspn_id(lport); - break; case LPORT_ST_RFT_ID: - fc_lport_enter_rft_id(lport); + fc_lport_enter_ns(lport, lport->state); break; case LPORT_ST_SCR: fc_lport_enter_scr(lport); -- cgit v1.2.3-59-g8ed1b From dc8596d303bb306da9ab5326fa6209710de86b8b Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:47:18 -0800 Subject: [SCSI] fcoe: vport symbolic name support Allow a vport specific string to be appended to the port symbolic name. The new symbolic name is sent to the name server after it is set. This currently messes with libhbalinux, which is looking for the fcoe "fcoe over " string and expects whatever comes after the "over" to be a network interface name only. Adds an EXPORT_SYMBOL to libfc for fc_frame_alloc_fill, which is needed to allow fcoe to allocate a frame of variable length for the RSPN request. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 33 +++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_frame.c | 1 + 2 files changed, 34 insertions(+) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 437eacf2732d..f1c126b798af 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -96,6 +96,7 @@ static struct scsi_transport_template *fcoe_vport_transport_template; static int fcoe_vport_destroy(struct fc_vport *vport); static int fcoe_vport_create(struct fc_vport *vport, bool disabled); static int fcoe_vport_disable(struct fc_vport *vport, bool disable); +static void fcoe_set_vport_symbolic_name(struct fc_vport *vport); struct fc_function_template fcoe_transport_function = { .show_host_node_name = 1, @@ -132,6 +133,7 @@ struct fc_function_template fcoe_transport_function = { .vport_create = fcoe_vport_create, .vport_delete = fcoe_vport_destroy, .vport_disable = fcoe_vport_disable, + .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, }; struct fc_function_template fcoe_vport_transport_function = { @@ -2326,3 +2328,34 @@ static int fcoe_vport_disable(struct fc_vport *vport, bool disable) return 0; } +/** + * fcoe_vport_set_symbolic_name() - append vport string to symbolic name + * @vport: fc_vport with a new symbolic name string + * + * After generating a new symbolic name string, a new RSPN_ID request is + * sent to the name server. There is no response handler, so if it fails + * for some reason it will not be retried. + */ +static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) +{ + struct fc_lport *lport = vport->dd_data; + struct fc_frame *fp; + size_t len; + + snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, + "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, + fcoe_netdev(lport)->name, vport->symbolic_name); + + if (lport->state != LPORT_ST_READY) + return; + + len = strnlen(fc_host_symbolic_name(lport->host), 255); + fp = fc_frame_alloc(lport, + sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rspn) + len); + if (!fp) + return; + lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, + NULL, NULL, lport->e_d_tov); +} + diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index ac3681ae68d9..4fea369b58ee 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -86,3 +86,4 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) } return fp; } +EXPORT_SYMBOL(fc_frame_alloc_fill); -- cgit v1.2.3-59-g8ed1b From 07aac328342d6ca1725d901e1c5da8a1aa88f557 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:47:23 -0800 Subject: [SCSI] libfc: Export FC headers Export fc_els.h, fc_fs.h, fc_gs.h and fc_ns.h so that they may be used by applications. This will be needed for FC Passthrough applications like fcping, but could be used by other applications. Fix to include to exported files provided by Chris Leech . Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/Kbuild | 1 + include/scsi/fc/Kbuild | 4 ++++ include/scsi/fc/fc_els.h | 2 ++ include/scsi/fc/fc_fs.h | 2 ++ include/scsi/fc/fc_gs.h | 2 ++ include/scsi/fc/fc_ns.h | 2 ++ 6 files changed, 13 insertions(+) create mode 100644 include/scsi/fc/Kbuild diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild index 33b2750e9283..b3a0ee6b2f1c 100644 --- a/include/scsi/Kbuild +++ b/include/scsi/Kbuild @@ -2,3 +2,4 @@ header-y += scsi.h header-y += scsi_netlink.h header-y += scsi_netlink_fc.h header-y += scsi_bsg_fc.h +header-y += fc/ diff --git a/include/scsi/fc/Kbuild b/include/scsi/fc/Kbuild new file mode 100644 index 000000000000..56603813c6cd --- /dev/null +++ b/include/scsi/fc/Kbuild @@ -0,0 +1,4 @@ +header-y += fc_els.h +header-y += fc_fs.h +header-y += fc_gs.h +header-y += fc_ns.h diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h index b0872afe2d30..f94328132a26 100644 --- a/include/scsi/fc/fc_els.h +++ b/include/scsi/fc/fc_els.h @@ -20,6 +20,8 @@ #ifndef _FC_ELS_H_ #define _FC_ELS_H_ +#include + /* * Fibre Channel Switch - Enhanced Link Services definitions. * From T11 FC-LS Rev 1.2 June 7, 2005. diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h index ac4cd38c860e..50f28b143451 100644 --- a/include/scsi/fc/fc_fs.h +++ b/include/scsi/fc/fc_fs.h @@ -20,6 +20,8 @@ #ifndef _FC_FS_H_ #define _FC_FS_H_ +#include + /* * Fibre Channel Framing and Signalling definitions. * From T11 FC-FS-2 Rev 0.90 - 9 August 2005. diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h index 324dd0e3c622..a37346d47eb1 100644 --- a/include/scsi/fc/fc_gs.h +++ b/include/scsi/fc/fc_gs.h @@ -20,6 +20,8 @@ #ifndef _FC_GS_H_ #define _FC_GS_H_ +#include + /* * Fibre Channel Services - Common Transport. * From T11.org FC-GS-2 Rev 5.3 November 1998. diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h index 3fd59a2cb81f..f4d354eb26b9 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/scsi/fc/fc_ns.h @@ -20,6 +20,8 @@ #ifndef _FC_NS_H_ #define _FC_NS_H_ +#include + /* * Fibre Channel Services - Name Service (dNS) * From T11.org FC-GS-2 Rev 5.3 November 1998. -- cgit v1.2.3-59-g8ed1b From 5868287460b0fc243e828a0b856cd53d8bf45739 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:47:28 -0800 Subject: [SCSI] libfc: Add routine to copy data from a buffer to a SG list When handling the multi-frame responses of fc pass-thru requests, a code segment similar to fc_fcp_recv_data (routine to receive inbound SCSI data) is used in the response handler. This patch is to add a routine, called fc_copy_buffer_to_sglist(), to handle the common function of copying data from a buffer to a scatter- gather list in order to avoid code duplication. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 56 ++++++++-------------------------------- drivers/scsi/libfc/fc_libfc.c | 60 +++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_libfc.h | 8 ++++++ 3 files changed, 78 insertions(+), 46 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 866f78ac4ec2..98279fe0d0c7 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -323,7 +323,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) size_t len; void *buf; struct scatterlist *sg; - size_t remaining; + u32 nents; fh = fc_frame_header_get(fp); offset = ntohl(fh->fh_parm_offset); @@ -347,55 +347,19 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) if (offset != fsp->xfer_len) fsp->state |= FC_SRB_DISCONTIG; - crc = 0; - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) - crc = crc32(~0, (u8 *) fh, sizeof(*fh)); - sg = scsi_sglist(sc); - remaining = len; - - while (remaining > 0 && sg) { - size_t off; - void *page_addr; - size_t sg_bytes; - - if (offset >= sg->length) { - offset -= sg->length; - sg = sg_next(sg); - continue; - } - sg_bytes = min(remaining, sg->length - offset); - - /* - * The scatterlist item may be bigger than PAGE_SIZE, - * but we are limited to mapping PAGE_SIZE at a time. - */ - off = offset + sg->offset; - sg_bytes = min(sg_bytes, (size_t) - (PAGE_SIZE - (off & ~PAGE_MASK))); - page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), - KM_SOFTIRQ0); - if (!page_addr) - break; /* XXX panic? */ - - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) - crc = crc32(crc, buf, sg_bytes); - memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, - sg_bytes); - - kunmap_atomic(page_addr, KM_SOFTIRQ0); - buf += sg_bytes; - offset += sg_bytes; - remaining -= sg_bytes; - copy_len += sg_bytes; - } + nents = scsi_sg_count(sc); - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, KM_SOFTIRQ0, NULL); + } else { + crc = crc32(~0, (u8 *) fh, sizeof(*fh)); + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, KM_SOFTIRQ0, &crc); buf = fc_frame_payload_get(fp, 0); - if (len % 4) { + if (len % 4) crc = crc32(crc, buf + len, 4 - (len % 4)); - len += 4 - (len % 4); - } if (~crc != le32_to_cpu(fr_crc(fp))) { crc_err: diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 01418ae8cb84..295eafb0316f 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -72,3 +72,63 @@ static void __exit libfc_exit(void) fc_destroy_rport(); } module_exit(libfc_exit); + +/** + * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer + * into a scatter-gather list (SG list). + * + * @buf: pointer to the data buffer. + * @len: the byte-length of the data buffer. + * @sg: pointer to the pointer of the SG list. + * @nents: pointer to the remaining number of entries in the SG list. + * @offset: pointer to the current offset in the SG list. + * @km_type: dedicated page table slot type for kmap_atomic. + * @crc: pointer to the 32-bit crc value. + * If crc is NULL, CRC is not calculated. + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + enum km_type km_type, u32 *crc) +{ + size_t remaining = len; + u32 copy_len = 0; + + while (remaining > 0 && sg) { + size_t off, sg_bytes; + void *page_addr; + + if (*offset >= sg->length) { + /* + * Check for end and drop resources + * from the last iteration. + */ + if (!(*nents)) + break; + --(*nents); + *offset -= sg->length; + sg = sg_next(sg); + continue; + } + sg_bytes = min(remaining, sg->length - *offset); + + /* + * The scatterlist item may be bigger than PAGE_SIZE, + * but we are limited to mapping PAGE_SIZE at a time. + */ + off = *offset + sg->offset; + sg_bytes = min(sg_bytes, + (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); + page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), + km_type); + if (crc) + *crc = crc32(*crc, buf, sg_bytes); + memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); + kunmap_atomic(page_addr, km_type); + buf += sg_bytes; + *offset += sg_bytes; + remaining -= sg_bytes; + copy_len += sg_bytes; + } + return copy_len; +} diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index 0530149ac174..e4b5e9280cb0 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -101,4 +101,12 @@ void fc_destroy_fcp(void); */ const char *fc_els_resp_type(struct fc_frame *); +/* + * Copies a buffer into an sg list + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + enum km_type km_type, u32 *crc); + #endif /* _FC_LIBFC_H_ */ -- cgit v1.2.3-59-g8ed1b From a51ab39606042e76a483547620699530caa12c40 Mon Sep 17 00:00:00 2001 From: Steve Ma Date: Tue, 3 Nov 2009 11:47:34 -0800 Subject: [SCSI] libfc, fcoe: Add FC passthrough support This is the Open-FCoE implementation of the FC passthrough support via bsg interface. Passthrough support is added to both N_Ports and VN_Ports. Signed-off-by: Steve Ma Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 4 + drivers/scsi/libfc/fc_lport.c | 267 ++++++++++++++++++++++++++++++++++++++++++ include/scsi/libfc.h | 7 ++ 3 files changed, 278 insertions(+) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f1c126b798af..8f078d306a0a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -134,6 +134,8 @@ struct fc_function_template fcoe_transport_function = { .vport_delete = fcoe_vport_destroy, .vport_disable = fcoe_vport_disable, .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, + + .bsg_request = fc_lport_bsg_request, }; struct fc_function_template fcoe_vport_transport_function = { @@ -167,6 +169,8 @@ struct fc_function_template fcoe_vport_transport_function = { .issue_fc_host_lip = fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, + + .bsg_request = fc_lport_bsg_request, }; static struct scsi_host_template fcoe_shost_template = { diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index dfea6c572dfb..2162e6b0f43e 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -94,6 +94,7 @@ #include #include +#include #include "fc_libfc.h" @@ -127,6 +128,24 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_RESET] = "reset", }; +/** + * struct fc_bsg_info - FC Passthrough managemet structure + * @job: The passthrough job + * @lport: The local port to pass through a command + * @rsp_code: The expected response code + * @sg: job->reply_payload.sg_list + * @nents: job->reply_payload.sg_cnt + * @offset: The offset into the response data + */ +struct fc_bsg_info { + struct fc_bsg_job *job; + struct fc_lport *lport; + u16 rsp_code; + struct scatterlist *sg; + u32 nents; + size_t offset; +}; + static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) { fc_frame_free(fp); @@ -1512,3 +1531,251 @@ int fc_lport_init(struct fc_lport *lport) return 0; } EXPORT_SYMBOL(fc_lport_init); + +/** + * fc_lport_bsg_resp() - The common response handler for fc pass-thru requests + * @sp: current sequence in the fc pass-thru request exchange + * @fp: received response frame + * @info_arg: pointer to struct fc_bsg_info + */ +static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, + void *info_arg) +{ + struct fc_bsg_info *info = info_arg; + struct fc_bsg_job *job = info->job; + struct fc_lport *lport = info->lport; + struct fc_frame_header *fh; + size_t len; + void *buf; + + if (IS_ERR(fp)) { + job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? + -ECONNABORTED : -ETIMEDOUT; + job->reply_len = sizeof(uint32_t); + job->state_flags |= FC_RQST_STATE_DONE; + job->job_done(job); + kfree(info); + return; + } + + mutex_lock(&lport->lp_mutex); + fh = fc_frame_header_get(fp); + len = fr_len(fp) - sizeof(*fh); + buf = fc_frame_payload_get(fp, 0); + + if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { + /* Get the response code from the first frame payload */ + unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? + ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : + (unsigned short)fc_frame_payload_op(fp); + + /* Save the reply status of the job */ + job->reply->reply_data.ctels_reply.status = + (cmd == info->rsp_code) ? + FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; + } + + job->reply->reply_payload_rcv_len += + fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, + &info->offset, KM_BIO_SRC_IRQ, NULL); + + if (fr_eof(fp) == FC_EOF_T && + (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == + (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { + if (job->reply->reply_payload_rcv_len > + job->reply_payload.payload_len) + job->reply->reply_payload_rcv_len = + job->reply_payload.payload_len; + job->reply->result = 0; + job->state_flags |= FC_RQST_STATE_DONE; + job->job_done(job); + kfree(info); + } + fc_frame_free(fp); + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_els_request() - Send ELS pass-thru request + * @job: The bsg fc pass-thru job structure + * @lport: The local port sending the request + * @did: The destination port id. + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static int fc_lport_els_request(struct fc_bsg_job *job, + struct fc_lport *lport, + u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + char *pp; + int len; + + fp = fc_frame_alloc(lport, sizeof(struct fc_frame_header) + + job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + pp = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + pp, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_ELS_REQ; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, fc_host_port_id(lport->host)); + fh->fh_type = FC_TYPE_ELS; + hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = ELS_LS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) + return -ECOMM; + return 0; +} + +/** + * fc_lport_ct_request() - Send CT pass-thru request + * @job: The bsg fc pass-thru job structure + * @lport: The local port sending the request + * @did: The destination FC-ID + * @tov: The time to wait for a response + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static int fc_lport_ct_request(struct fc_bsg_job *job, + struct fc_lport *lport, u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + struct fc_ct_req *ct; + size_t len; + + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + ct = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ct, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, fc_host_port_id(lport->host)); + fh->fh_type = FC_TYPE_CT; + hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = FC_FS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) + return -ECOMM; + return 0; +} + +/** + * fc_lport_bsg_request() - The common entry point for sending + * fc pass-thru requests + * @job: The fc pass-thru job structure + */ +int fc_lport_bsg_request(struct fc_bsg_job *job) +{ + struct request *rsp = job->req->next_rq; + struct Scsi_Host *shost = job->shost; + struct fc_lport *lport = shost_priv(shost); + struct fc_rport *rport; + struct fc_rport_priv *rdata; + int rc = -EINVAL; + u32 did; + + job->reply->reply_payload_rcv_len = 0; + rsp->resid_len = job->reply_payload.payload_len; + + mutex_lock(&lport->lp_mutex); + + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + rport = job->rport; + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_els_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_RPT_CT: + rport = job->rport; + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_ct_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_HST_CT: + did = ntoh24(job->request->rqst_data.h_ct.port_id); + if (did == FC_FID_DIR_SERV) + rdata = lport->dns_rp; + else + rdata = lport->tt.rport_lookup(lport, did); + + if (!rdata) + break; + + rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); + break; + + case FC_BSG_HST_ELS_NOLOGIN: + did = ntoh24(job->request->rqst_data.h_els.port_id); + rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); + break; + } + + mutex_unlock(&lport->lp_mutex); + return rc; +} +EXPORT_SYMBOL(fc_lport_bsg_request); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 8258edfa328c..54df9fe00c14 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -830,6 +831,12 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id); void fc_vport_setlink(struct fc_lport *vn_port); void fc_vports_linkchange(struct fc_lport *n_port); +/* + * Issue fc pass-thru request via bsg interface + */ +int fc_lport_bsg_request(struct fc_bsg_job *job); + + /* * REMOTE PORT LAYER *****************************/ -- cgit v1.2.3-59-g8ed1b From 3a3b42bf89a9b90ae9ed2c57fdc378e5473a0ef9 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:47:39 -0800 Subject: [SCSI] libfc: Formatting cleanups across libfc This patch makes a variety of cleanup changes to all libfc files. This patch adds kernel-doc headers to all functions lacking them and attempts to better format existing headers. It also add kernel-doc headers to structures. This patch ensures that the current naming conventions for local ports, remote ports and remote port private data is upheld in the following manner. struct instance (i.e. variable name) -------------------------------------------------- fc_lport lport fc_rport rport fc_rport_libfc_priv rpriv fc_rport_priv rdata I also renamed dns_rp and ptp_rp to dns_rdata and ptp_rdata respectively. I used emacs 'indent-region' and 'tabify' on all libfc files to correct spacing alignments. I feel sorry for anyone attempting to review this patch. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 78 ++-- drivers/scsi/libfc/fc_elsct.c | 33 +- drivers/scsi/libfc/fc_exch.c | 587 +++++++++++++++++-------- drivers/scsi/libfc/fc_fcp.c | 667 ++++++++++++++++------------ drivers/scsi/libfc/fc_libfc.c | 4 +- drivers/scsi/libfc/fc_libfc.h | 34 +- drivers/scsi/libfc/fc_lport.c | 247 ++++++----- drivers/scsi/libfc/fc_rport.c | 235 +++++----- include/scsi/libfc.h | 994 +++++++++++++++++++++--------------------- 9 files changed, 1624 insertions(+), 1255 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index a4bdec28fef5..7b790ad15a93 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -53,8 +53,8 @@ static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); /** - * fc_disc_stop_rports() - delete all the remote ports associated with the lport - * @disc: The discovery job to stop rports on + * fc_disc_stop_rports() - Delete all the remote ports associated with the lport + * @disc: The discovery job to stop remote ports on * * Locking Note: This function expects that the lport mutex is locked before * calling it. @@ -74,9 +74,9 @@ void fc_disc_stop_rports(struct fc_disc *disc) /** * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) - * @sp: Current sequence of the RSCN exchange - * @fp: RSCN Frame - * @lport: Fibre Channel host port instance + * @sp: The sequence of the RSCN exchange + * @fp: The RSCN frame + * @lport: The local port that the request will be sent on * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -185,9 +185,9 @@ reject: /** * fc_disc_recv_req() - Handle incoming requests - * @sp: Current sequence of the request exchange - * @fp: The frame - * @lport: The FC local port + * @sp: The sequence of the request exchange + * @fp: The request frame + * @lport: The local port receiving the request * * Locking Note: This function is called from the EM and will lock * the disc_mutex before calling the handler for the @@ -215,7 +215,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_disc_restart() - Restart discovery - * @lport: FC discovery context + * @disc: The discovery object to be restarted * * Locking Note: This function expects that the disc mutex * is already locked. @@ -242,9 +242,9 @@ static void fc_disc_restart(struct fc_disc *disc) } /** - * fc_disc_start() - Fibre Channel Target discovery - * @lport: FC local port - * @disc_callback: function to be called when discovery is complete + * fc_disc_start() - Start discovery on a local port + * @lport: The local port to have discovery started on + * @disc_callback: Callback function to be called when discovery is complete */ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), @@ -265,8 +265,8 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, /** * fc_disc_done() - Discovery has been completed - * @disc: FC discovery context - * @event: discovery completion status + * @disc: The discovery context + * @event: The discovery completion status * * Locking Note: This function expects that the disc mutex is locked before * it is called. The discovery callback is then made with the lock released, @@ -286,8 +286,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) } /* - * Go through all remote ports. If they were found in the latest - * discovery, reverify or log them in. Otherwise, log them out. + * Go through all remote ports. If they were found in the latest + * discovery, reverify or log them in. Otherwise, log them out. * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. */ @@ -307,8 +307,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) /** * fc_disc_error() - Handle error on dNS request - * @disc: FC discovery context - * @fp: The frame pointer + * @disc: The discovery context + * @fp: The error code encoded as a frame pointer */ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) { @@ -344,7 +344,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) /** * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request - * @lport: FC discovery context + * @lport: The discovery context * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -378,9 +378,9 @@ err: /** * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. - * @lport: Fibre Channel host port instance - * @buf: GPN_FT response buffer - * @len: size of response buffer + * @lport: The local port the GPN_FT was received on + * @buf: The GPN_FT response buffer + * @len: The size of response buffer * * Goes through the list of IDs and names resulting from a request. */ @@ -479,10 +479,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) } /** - * fc_disc_timeout() - Retry handler for the disc component - * @work: Structure holding disc obj that needs retry discovery - * - * Handle retry of memory allocation for remote ports. + * fc_disc_timeout() - Handler for discovery timeouts + * @work: Structure holding discovery context that needs to retry discovery */ static void fc_disc_timeout(struct work_struct *work) { @@ -496,9 +494,9 @@ static void fc_disc_timeout(struct work_struct *work) /** * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) - * @sp: Current sequence of GPN_FT exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance + * @sp: The sequence that the GPN_FT response was received on + * @fp: The GPN_FT response frame + * @lp_arg: The discovery context * * Locking Note: This function is called without disc mutex held, and * should do all its processing with the mutex held @@ -569,9 +567,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, /** * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) - * @sp: exchange sequence - * @fp: response frame - * @rdata_arg: remote port private data + * @sp: The sequence the GPN_ID is on + * @fp: The response frame + * @rdata_arg: The remote port that sent the GPN_ID response * * Locking Note: This function is called without disc mutex held. */ @@ -639,7 +637,7 @@ out: /** * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request - * @lport: local port + * @lport: The local port to initiate discovery on * @rdata: remote port private data * * Locking Note: This function expects that the disc_mutex is locked @@ -656,7 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, if (!fp) return -ENOMEM; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, - fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) + fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) return -ENOMEM; kref_get(&rdata->kref); return 0; @@ -664,8 +662,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, /** * fc_disc_single() - Discover the directory information for a single target - * @lport: local port - * @dp: The port to rediscover + * @lport: The local port the remote port is associated with + * @dp: The port to rediscover * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -683,7 +681,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) /** * fc_disc_stop() - Stop discovery for a given lport - * @lport: The lport that discovery should stop for + * @lport: The local port that discovery should stop on */ void fc_disc_stop(struct fc_lport *lport) { @@ -697,7 +695,7 @@ void fc_disc_stop(struct fc_lport *lport) /** * fc_disc_stop_final() - Stop discovery for a given lport - * @lport: The lport that discovery should stop for + * @lport: The lport that discovery should stop on * * This function will block until discovery has been * completely stopped and all rports have been deleted. @@ -709,8 +707,8 @@ void fc_disc_stop_final(struct fc_lport *lport) } /** - * fc_disc_init() - Initialize the discovery block - * @lport: FC local port + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized */ int fc_disc_init(struct fc_lport *lport) { diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index aae54fe3b299..01be43f80f34 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -28,17 +28,22 @@ #include #include -/* - * fc_elsct_send - sends ELS/CT frame +/** + * fc_elsct_send() - Send an ELS or CT frame + * @lport: The local port to send the frame on + * @did: The destination ID for the frame + * @fp: The frame to be sent + * @op: The operational code + * @resp: The callback routine when the response is received + * @arg: The argument to pass to the response callback routine + * @timer_msec: The timeout period for the frame (in msecs) */ -struct fc_seq *fc_elsct_send(struct fc_lport *lport, - u32 did, - struct fc_frame *fp, - unsigned int op, - void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), - void *arg, u32 timer_msec) +struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timer_msec) { enum fc_rctl r_ctl; enum fc_fh_type fh_type; @@ -65,6 +70,10 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, } EXPORT_SYMBOL(fc_elsct_send); +/** + * fc_elsct_init() - Initialize the ELS/CT layer + * @lport: The local port to initialize the ELS/CT layer for + */ int fc_elsct_init(struct fc_lport *lport) { if (!lport->tt.elsct_send) @@ -75,8 +84,8 @@ int fc_elsct_init(struct fc_lport *lport) EXPORT_SYMBOL(fc_elsct_init); /** - * fc_els_resp_type() - return string describing ELS response for debug. - * @fp: frame pointer with possible error code. + * fc_els_resp_type() - Return a string describing the ELS response + * @fp: The frame pointer or possible error code */ const char *fc_els_resp_type(struct fc_frame *fp) { diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 751a485685d9..0f45bb8521f1 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -37,7 +37,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ -static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ +static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -52,34 +52,46 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ * fc_seq holds the state for an individual sequence. */ -/* - * Per cpu exchange pool +/** + * struct fc_exch_pool - Per cpu exchange pool + * @next_index: Next possible free exchange index + * @total_exches: Total allocated exchanges + * @lock: Exch pool lock + * @ex_list: List of exchanges * * This structure manages per cpu exchanges in array of exchange pointers. * This array is allocated followed by struct fc_exch_pool memory for * assigned range of exchanges to per cpu pool. */ struct fc_exch_pool { - u16 next_index; /* next possible free exchange index */ - u16 total_exches; /* total allocated exchanges */ - spinlock_t lock; /* exch pool lock */ - struct list_head ex_list; /* allocated exchanges list */ + u16 next_index; + u16 total_exches; + spinlock_t lock; + struct list_head ex_list; }; -/* - * Exchange manager. +/** + * struct fc_exch_mgr - The Exchange Manager (EM). + * @class: Default class for new sequences + * @kref: Reference counter + * @min_xid: Minimum exchange ID + * @max_xid: Maximum exchange ID + * @ep_pool: Reserved exchange pointers + * @pool_max_index: Max exch array index in exch pool + * @pool: Per cpu exch pool + * @stats: Statistics structure * * This structure is the center for creating exchanges and sequences. * It manages the allocation of exchange IDs. */ struct fc_exch_mgr { - enum fc_class class; /* default class for sequences */ - struct kref kref; /* exchange mgr reference count */ - u16 min_xid; /* min exchange ID */ - u16 max_xid; /* max exchange ID */ - mempool_t *ep_pool; /* reserve ep's */ - u16 pool_max_index; /* max exch array index in exch pool */ - struct fc_exch_pool *pool; /* per cpu exch pool */ + enum fc_class class; + struct kref kref; + u16 min_xid; + u16 max_xid; + mempool_t *ep_pool; + u16 pool_max_index; + struct fc_exch_pool *pool; /* * currently exchange mgr stats are updated but not used. @@ -97,6 +109,18 @@ struct fc_exch_mgr { }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +/** + * struct fc_exch_mgr_anchor - primary structure for list of EMs + * @ema_list: Exchange Manager Anchor list + * @mp: Exchange Manager associated with this anchor + * @match: Routine to determine if this anchor's EM should be used + * + * When walking the list of anchors the match routine will be called + * for each anchor to determine if that EM should be used. The last + * anchor in the list will always match to handle any exchanges not + * handled by other EMs. The non-default EMs would be added to the + * anchor list by HW that provides FCoE offloads. + */ struct fc_exch_mgr_anchor { struct list_head ema_list; struct fc_exch_mgr *mp; @@ -196,6 +220,15 @@ static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) +/** + * fc_exch_name_lookup() - Lookup name by opcode + * @op: Opcode to be looked up + * @table: Opcode/name table + * @max_index: Index not to be exceeded + * + * This routine is used to determine a human-readable string identifying + * a R_CTL opcode. + */ static inline const char *fc_exch_name_lookup(unsigned int op, char **table, unsigned int max_index) { @@ -208,25 +241,34 @@ static inline const char *fc_exch_name_lookup(unsigned int op, char **table, return name; } +/** + * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup() + * @op: The opcode to be looked up + */ static const char *fc_exch_rctl_name(unsigned int op) { return fc_exch_name_lookup(op, fc_exch_rctl_names, FC_TABLE_SIZE(fc_exch_rctl_names)); } -/* - * Hold an exchange - keep it from being freed. +/** + * fc_exch_hold() - Increment an exchange's reference count + * @ep: Echange to be held */ -static void fc_exch_hold(struct fc_exch *ep) +static inline void fc_exch_hold(struct fc_exch *ep) { atomic_inc(&ep->ex_refcnt); } -/* - * setup fc hdr by initializing few more FC header fields and sof/eof. - * Initialized fields by this func: - * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt - * - sof and eof +/** + * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields + * and determine SOF and EOF. + * @ep: The exchange to that will use the header + * @fp: The frame whose header is to be modified + * @f_ctl: F_CTL bits that will be used for the frame header + * + * The fields initialized by this routine are: fh_ox_id, fh_rx_id, + * fh_seq_id, fh_seq_cnt and the SOF and EOF. */ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, u32 f_ctl) @@ -243,7 +285,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, if (fc_sof_needs_ack(ep->class)) fr_eof(fp) = FC_EOF_N; /* - * Form f_ctl. + * From F_CTL. * The number of fill bytes to make the length a 4-byte * multiple is the low order 2-bits of the f_ctl. * The fill itself will have been cleared by the frame @@ -273,9 +315,12 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fh->fh_seq_cnt = htons(ep->seq.cnt); } -/* - * Release a reference to an exchange. - * If the refcnt goes to zero and the exchange is complete, it is freed. +/** + * fc_exch_release() - Decrement an exchange's reference count + * @ep: Exchange to be released + * + * If the reference count reaches zero and the exchange is complete, + * it is freed. */ static void fc_exch_release(struct fc_exch *ep) { @@ -290,6 +335,10 @@ static void fc_exch_release(struct fc_exch *ep) } } +/** + * fc_exch_done_locked() - Complete an exchange with the exchange lock held + * @ep: The exchange that is complete + */ static int fc_exch_done_locked(struct fc_exch *ep) { int rc = 1; @@ -314,6 +363,15 @@ static int fc_exch_done_locked(struct fc_exch *ep) return rc; } +/** + * fc_exch_ptr_get() - Return an exchange from an exchange pool + * @pool: Exchange Pool to get an exchange from + * @index: Index of the exchange within the pool + * + * Use the index to get an exchange from within an exchange pool. exches + * will point to an array of exchange pointers. The index will select + * the exchange within the array. + */ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, u16 index) { @@ -321,12 +379,22 @@ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, return exches[index]; } +/** + * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool + * @pool: The pool to assign the exchange to + * @index: The index in the pool where the exchange will be assigned + * @ep: The exchange to assign to the pool + */ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, struct fc_exch *ep) { ((struct fc_exch **)(pool + 1))[index] = ep; } +/** + * fc_exch_delete() - Delete an exchange + * @ep: The exchange to be deleted + */ static void fc_exch_delete(struct fc_exch *ep) { struct fc_exch_pool *pool; @@ -342,8 +410,14 @@ static void fc_exch_delete(struct fc_exch *ep) fc_exch_release(ep); /* drop hold for exch in mp */ } -/* - * Internal version of fc_exch_timer_set - used with lock held. +/** + * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the + * the exchange lock held + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + * + * Used for upper level protocols to time out the exchange. + * The timer is cancelled when it fires or when the exchange completes. */ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, unsigned int timer_msec) @@ -358,12 +432,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, fc_exch_hold(ep); /* hold for timer */ } -/* - * Set timer for an exchange. - * The time is a minimum delay in milliseconds until the timer fires. - * Used for upper level protocols to time out the exchange. - * The timer is cancelled when it fires or when the exchange completes. - * Returns non-zero if a timer couldn't be allocated. +/** + * fc_exch_timer_set() - Lock the exchange and set the timer + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period */ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) { @@ -373,15 +445,18 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) } /** - * send a frame using existing sequence and exchange. + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange */ -static int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, +static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh = fc_frame_header_get(fp); int error; - u32 f_ctl; + u32 f_ctl; ep = fc_seq_exch(sp); WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); @@ -403,7 +478,7 @@ static int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, /* * Send the frame. */ - error = lp->tt.frame_send(lp, fp); + error = lport->tt.frame_send(lport, fp); /* * Update the exchange and sequence flags, @@ -419,9 +494,9 @@ static int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, } /** - * fc_seq_alloc() - Allocate a sequence. - * @ep: Exchange pointer - * @seq_id: Sequence ID to allocate a sequence for + * fc_seq_alloc() - Allocate a sequence for a given exchange + * @ep: The exchange to allocate a new sequence for + * @seq_id: The sequence ID to be used * * We don't support multiple originated sequences on the same exchange. * By implication, any previously originated sequence on this exchange @@ -438,6 +513,11 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) return sp; } +/** + * fc_seq_start_next_locked() - Allocate a new sequence on the same + * exchange as the supplied sequence + * @sp: The sequence/exchange to get a new sequence for + */ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); @@ -449,8 +529,9 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) } /** - * Allocate a new sequence on the same exchange as the supplied sequence. - * This will never return NULL. + * fc_seq_start_next() - Lock the exchange and get a new sequence + * for a given sequence/exchange pair + * @sp: The sequence/exchange to get a new exchange for */ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) { @@ -464,9 +545,11 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) } /** - * This function is for seq_exch_abort function pointer in - * struct libfc_function_template, see comment block on - * seq_exch_abort for description of this function. + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp: The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. */ static int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) @@ -519,9 +602,9 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, return error; } -/* - * Exchange timeout - handle exchange timer expiration. - * The timer will have been cancelled before this is called. +/** + * fc_exch_timeout() - Handle exchange timer expiration + * @work: The work_struct identifying the exchange that timed out */ static void fc_exch_timeout(struct work_struct *work) { @@ -570,9 +653,9 @@ done: } /** - * fc_exch_em_alloc() - allocate an exchange from a specified EM. - * @lport: ptr to the local port - * @mp: ptr to the exchange manager + * fc_exch_em_alloc() - Allocate an exchange from a specified EM. + * @lport: The local port that the exchange is for + * @mp: The exchange manager that will allocate the exchange * * Returns pointer to allocated fc_exch with exch lock held. */ @@ -640,14 +723,15 @@ err: } /** - * fc_exch_alloc() - allocate an exchange. - * @lport: ptr to the local port - * @fp: ptr to the FC frame + * fc_exch_alloc() - Allocate an exchange from an EM on a + * local port's list of EMs. + * @lport: The local port that will own the exchange + * @fp: The FC frame that the exchange will be for * - * This function walks the list of the exchange manager(EM) - * anchors to select a EM for new exchange allocation. The - * EM is selected having either a NULL match function pointer - * or call to match function returning true. + * This function walks the list of exchange manager(EM) + * anchors to select an EM for a new exchange allocation. The + * EM is selected when a NULL match function pointer is encountered + * or when a call to a match function returns true. */ static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) @@ -665,8 +749,10 @@ static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, return NULL; } -/* - * Lookup and hold an exchange. +/** + * fc_exch_find() - Lookup and hold an exchange + * @mp: The exchange manager to lookup the exchange from + * @xid: The XID of the exchange to look up */ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { @@ -689,8 +775,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) /** * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and - * the memory allocated for the related objects may be freed. - * @sp: Sequence pointer + * the memory allocated for the related objects may be freed. + * @sp: The sequence that has completed */ static void fc_exch_done(struct fc_seq *sp) { @@ -704,8 +790,12 @@ static void fc_exch_done(struct fc_seq *sp) fc_exch_delete(ep); } -/* - * Allocate a new exchange as responder. +/** + * fc_exch_resp() - Allocate a new exchange for a response frame + * @lport: The local port that the exchange was for + * @mp: The exchange manager to allocate the exchange from + * @fp: The response frame + * * Sets the responder ID in the frame header. */ static struct fc_exch *fc_exch_resp(struct fc_lport *lport, @@ -746,8 +836,13 @@ static struct fc_exch *fc_exch_resp(struct fc_lport *lport, return ep; } -/* - * Find a sequence for receive where the other end is originating the sequence. +/** + * fc_seq_lookup_recip() - Find a sequence where the other end + * originated the sequence + * @lport: The local port that the frame was sent to + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold * on the ep that should be released by the caller. */ @@ -853,10 +948,12 @@ rel: return reject; } -/* - * Find the sequence for a frame being received. - * We originated the sequence, so it should be found. - * We may or may not have originated the exchange. +/** + * fc_seq_lookup_orig() - Find a sequence where this end + * originated the sequence + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * * Does not hold the sequence for the caller. */ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, @@ -888,8 +985,12 @@ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, return sp; } -/* - * Set addresses for an exchange. +/** + * fc_exch_set_addr() - Set the source and destination IDs for an exchange + * @ep: The exchange to set the addresses for + * @orig_id: The originator's ID + * @resp_id: The responder's ID + * * Note this must be done before the first sequence of the exchange is sent. */ static void fc_exch_set_addr(struct fc_exch *ep, @@ -906,11 +1007,11 @@ static void fc_exch_set_addr(struct fc_exch *ep, } /** - * fc_seq_els_rsp_send() - Send ELS response using mainly infomation - * in exchange and sequence in EM layer. - * @sp: Sequence pointer - * @els_cmd: ELS command - * @els_data: ELS data + * fc_seq_els_rsp_send() - Send an ELS response using infomation from + * the existing sequence/exchange. + * @sp: The sequence/exchange to get information from + * @els_cmd: The ELS command to be sent + * @els_data: The ELS data to be sent */ static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, struct fc_seq_els_data *els_data) @@ -933,8 +1034,12 @@ static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, } } -/* - * Send a sequence, which is also the last sequence in the exchange. +/** + * fc_seq_send_last() - Send a sequence that is the last in the exchange + * @sp: The sequence that is to be sent + * @fp: The frame that will be sent on the sequence + * @rctl: The R_CTL information to be sent + * @fh_type: The frame header type */ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, enum fc_rctl rctl, enum fc_fh_type fh_type) @@ -948,9 +1053,12 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, fc_seq_send(ep->lp, sp, fp); } -/* +/** + * fc_seq_send_ack() - Send an acknowledgement that we've received a frame + * @sp: The sequence to send the ACK on + * @rx_fp: The received frame that is being acknoledged + * * Send ACK_1 (or equiv.) indicating we received something. - * The frame we're acking is supplied. */ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) { @@ -958,14 +1066,14 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) struct fc_frame_header *rx_fh; struct fc_frame_header *fh; struct fc_exch *ep = fc_seq_exch(sp); - struct fc_lport *lp = ep->lp; + struct fc_lport *lport = ep->lp; unsigned int f_ctl; /* * Don't send ACKs for class 3. */ if (fc_sof_needs_ack(fr_sof(rx_fp))) { - fp = fc_frame_alloc(lp, 0); + fp = fc_frame_alloc(lport, 0); if (!fp) return; @@ -1000,12 +1108,16 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) else fr_eof(fp) = FC_EOF_N; - (void) lp->tt.frame_send(lp, fp); + lport->tt.frame_send(lport, fp); } } -/* - * Send BLS Reject. +/** + * fc_exch_send_ba_rjt() - Send BLS Reject + * @rx_fp: The frame being rejected + * @reason: The reason the frame is being rejected + * @explan: The explaination for the rejection + * * This is for rejecting BA_ABTS only. */ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, @@ -1016,11 +1128,11 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, struct fc_frame_header *rx_fh; struct fc_frame_header *fh; struct fc_ba_rjt *rp; - struct fc_lport *lp; + struct fc_lport *lport; unsigned int f_ctl; - lp = fr_dev(rx_fp); - fp = fc_frame_alloc(lp, sizeof(*rp)); + lport = fr_dev(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*rp)); if (!fp) return; fh = fc_frame_header_get(fp); @@ -1065,13 +1177,17 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, if (fc_sof_needs_ack(fr_sof(fp))) fr_eof(fp) = FC_EOF_N; - (void) lp->tt.frame_send(lp, fp); + lport->tt.frame_send(lport, fp); } -/* - * Handle an incoming ABTS. This would be for target mode usually, - * but could be due to lost FCP transfer ready, confirm or RRQ. - * We always handle this as an exchange abort, ignoring the parameter. +/** + * fc_exch_recv_abts() - Handle an incoming ABTS + * @ep: The exchange the abort was on + * @rx_fp: The ABTS frame + * + * This would be for target mode usually, but could be due to lost + * FCP transfer ready, confirm or RRQ. We always handle this as an + * exchange abort, ignoring the parameter. */ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) { @@ -1120,10 +1236,14 @@ free: fc_frame_free(rx_fp); } -/* - * Handle receive where the other end is originating the sequence. +/** + * fc_exch_recv_req() - Handler for an incoming request where is other + * end is originating the sequence + * @lport: The local port that received the request + * @mp: The EM that the exchange is on + * @fp: The request frame */ -static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, +static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -1137,14 +1257,14 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, /* We can have the wrong fc_lport at this point with NPIV, which is a * problem now that we know a new exchange needs to be allocated */ - lp = fc_vport_id_lookup(lp, ntoh24(fh->fh_d_id)); - if (!lp) { + lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (!lport) { fc_frame_free(fp); return; } fr_seq(fp) = NULL; - reject = fc_seq_lookup_recip(lp, mp, fp); + reject = fc_seq_lookup_recip(lport, mp, fp); if (reject == FC_RJT_NONE) { sp = fr_seq(fp); /* sequence will be held */ ep = fc_seq_exch(sp); @@ -1167,17 +1287,21 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, if (ep->resp) ep->resp(sp, fp, ep->arg); else - lp->tt.lport_recv(lp, sp, fp); + lport->tt.lport_recv(lport, sp, fp); fc_exch_release(ep); /* release from lookup */ } else { - FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); + FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", + reject); fc_frame_free(fp); } } -/* - * Handle receive where the other end is originating the sequence in - * response to our exchange. +/** + * fc_exch_recv_seq_resp() - Handler for an incoming response where the other + * end is the originator of the sequence that is a + * response to our initial exchange + * @mp: The EM that the exchange is on + * @fp: The response frame */ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) { @@ -1268,8 +1392,11 @@ out: fc_frame_free(fp); } -/* - * Handle receive for a sequence where other end is responding to our sequence. +/** + * fc_exch_recv_resp() - Handler for a sequence where other end is + * responding to our sequence + * @mp: The EM that the exchange is on + * @fp: The response frame */ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) { @@ -1285,9 +1412,13 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) fc_frame_free(fp); } -/* - * Handle the response to an ABTS for exchange or sequence. - * This can be BA_ACC or BA_RJT. +/** + * fc_exch_abts_resp() - Handler for a response to an ABT + * @ep: The exchange that the frame is on + * @fp: The response frame + * + * This response would be to an ABTS cancelling an exchange or sequence. + * The response can be either BA_ACC or BA_RJT */ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) { @@ -1362,9 +1493,12 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) } -/* - * Receive BLS sequence. - * This is always a sequence initiated by the remote side. +/** + * fc_exch_recv_bls() - Handler for a BLS sequence + * @mp: The EM that the exchange is on + * @fp: The request frame + * + * The BLS frame is always a sequence initiated by the remote side. * We may be either the originator or recipient of the exchange. */ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) @@ -1421,8 +1555,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) fc_exch_release(ep); /* release hold taken by fc_exch_find */ } -/* - * Accept sequence with LS_ACC. +/** + * fc_seq_ls_acc() - Accept sequence with LS_ACC + * @req_sp: The request sequence + * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ @@ -1442,8 +1578,12 @@ static void fc_seq_ls_acc(struct fc_seq *req_sp) } } -/* - * Reject sequence with ELS LS_RJT. +/** + * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT + * @req_sp: The request sequence + * @reason: The reason the sequence is being rejected + * @explan: The explaination for the rejection + * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ @@ -1466,6 +1606,10 @@ static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, } } +/** + * fc_exch_reset() - Reset an exchange + * @ep: The exchange to be reset + */ static void fc_exch_reset(struct fc_exch *ep) { struct fc_seq *sp; @@ -1500,16 +1644,16 @@ static void fc_exch_reset(struct fc_exch *ep) } /** - * fc_exch_pool_reset() - Resets an per cpu exches pool. - * @lport: ptr to the local port - * @pool: ptr to the per cpu exches pool - * @sid: source FC ID - * @did: destination FC ID + * fc_exch_pool_reset() - Reset a per cpu exchange pool + * @lport: The local port that the exchange pool is on + * @pool: The exchange pool to be reset + * @sid: The source ID + * @did: The destination ID * - * Resets an per cpu exches pool, releasing its all sequences - * and exchanges. If sid is non-zero, then reset only exchanges - * we sourced from that FID. If did is non-zero, reset only - * exchanges destined to that FID. + * Resets a per cpu exches pool, releasing all of its sequences + * and exchanges. If sid is non-zero then reset only exchanges + * we sourced from the local port's FID. If did is non-zero then + * only reset exchanges destined for the local port's FID. */ static void fc_exch_pool_reset(struct fc_lport *lport, struct fc_exch_pool *pool, @@ -1543,15 +1687,15 @@ restart: } /** - * fc_exch_mgr_reset() - Resets all EMs of a lport - * @lport: ptr to the local port - * @sid: source FC ID - * @did: destination FC ID + * fc_exch_mgr_reset() - Reset all EMs of a local port + * @lport: The local port whose EMs are to be reset + * @sid: The source ID + * @did: The destination ID * - * Reset all EMs of a lport, releasing its all sequences and - * exchanges. If sid is non-zero, then reset only exchanges - * we sourced from that FID. If did is non-zero, reset only - * exchanges destined to that FID. + * Reset all EMs associated with a given local port. Release all + * sequences and exchanges. If sid is non-zero then reset only the + * exchanges sent from the local port's FID. If did is non-zero then + * reset only exchanges destined for the local port's FID. */ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) { @@ -1567,8 +1711,11 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) } EXPORT_SYMBOL(fc_exch_mgr_reset); -/* - * Handle incoming ELS REC - Read Exchange Concise. +/** + * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests + * @sp: The sequence the REC is on + * @rfp: The REC frame + * * Note that the requesting port may be different than the S_ID in the request. */ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) @@ -1650,10 +1797,11 @@ reject: fc_frame_free(rfp); } -/* - * Handle response from RRQ. - * Not much to do here, really. - * Should report errors. +/** + * fc_exch_rrq_resp() - Handler for RRQ responses + * @sp: The sequence that the RRQ is on + * @fp: The RRQ frame + * @arg: The exchange that the RRQ is on * * TODO: fix error handler. */ @@ -1695,11 +1843,25 @@ cleanup: /** - * This function is for exch_seq_send function pointer in - * struct libfc_function_template, see comment block on - * exch_seq_send for description of this function. + * fc_exch_seq_send() - Send a frame using a new exchange and sequence + * @lport: The local port to send the frame on + * @fp: The frame to be sent + * @resp: The response handler for this request + * @destructor: The destructor for the exchange + * @arg: The argument to be passed to the response handler + * @timer_msec: The timeout period for the exchange + * + * The frame pointer with some of the header's fields must be + * filled before calling this routine, those fields are: + * + * - routing control + * - FC port did + * - FC port sid + * - FC header type + * - frame control + * - parameter or relative offset */ -static struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, +static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, struct fc_frame *fp, void (*resp)(struct fc_seq *, struct fc_frame *fp, @@ -1713,7 +1875,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame_header *fh; int rc = 1; - ep = fc_exch_alloc(lp, fp); + ep = fc_exch_alloc(lport, fp); if (!ep) { fc_frame_free(fp); return NULL; @@ -1725,7 +1887,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, ep->destructor = destructor; ep->arg = arg; ep->r_a_tov = FC_DEF_R_A_TOV; - ep->lp = lp; + ep->lp = lport; sp = &ep->seq; ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ @@ -1733,10 +1895,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - if (ep->xid <= lp->lro_xid) + if (ep->xid <= lport->lro_xid) fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); - if (unlikely(lp->tt.frame_send(lp, fp))) + if (unlikely(lport->tt.frame_send(lport, fp))) goto err; if (timer_msec) @@ -1755,21 +1917,23 @@ err: return NULL; } -/* - * Send ELS RRQ - Reinstate Recovery Qualifier. +/** + * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command + * @ep: The exchange to send the RRQ on + * * This tells the remote port to stop blocking the use of * the exchange and the seq_cnt range. */ static void fc_exch_rrq(struct fc_exch *ep) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_els_rrq *rrq; struct fc_frame *fp; u32 did; - lp = ep->lp; + lport = ep->lp; - fp = fc_frame_alloc(lp, sizeof(*rrq)); + fp = fc_frame_alloc(lport, sizeof(*rrq)); if (!fp) goto retry; @@ -1785,10 +1949,11 @@ static void fc_exch_rrq(struct fc_exch *ep) did = ep->sid; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, - fc_host_port_id(lp->host), FC_TYPE_ELS, + fc_host_port_id(lport->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) + if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, + lport->e_d_tov)) return; retry: @@ -1805,8 +1970,10 @@ retry: } -/* - * Handle incoming ELS RRQ - Reset Recovery Qualifier. +/** + * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests + * @sp: The sequence that the RRQ is on + * @fp: The RRQ frame */ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) { @@ -1872,6 +2039,12 @@ out: fc_exch_release(ep); /* drop hold from fc_exch_find */ } +/** + * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs + * @lport: The local port to add the exchange manager to + * @mp: The exchange manager to be added to the local port + * @match: The match routine that indicates when this EM should be used + */ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, struct fc_exch_mgr *mp, bool (*match)(struct fc_frame *)) @@ -1891,6 +2064,10 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, } EXPORT_SYMBOL(fc_exch_mgr_add); +/** + * fc_exch_mgr_destroy() - Destroy an exchange manager + * @kref: The reference to the EM to be destroyed + */ static void fc_exch_mgr_destroy(struct kref *kref) { struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); @@ -1900,6 +2077,10 @@ static void fc_exch_mgr_destroy(struct kref *kref) kfree(mp); } +/** + * fc_exch_mgr_del() - Delete an EM from a local port's list + * @ema: The exchange manager anchor identifying the EM to be deleted + */ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) { /* remove EM anchor from EM anchors list */ @@ -1910,9 +2091,9 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) EXPORT_SYMBOL(fc_exch_mgr_del); /** - * fc_exch_mgr_list_clone() - share all exchange manager objects - * @src: source lport to clone exchange managers from - * @dst: new lport that takes references to all the exchange managers + * fc_exch_mgr_list_clone() - Share all exchange manager objects + * @src: Source lport to clone exchange managers from + * @dst: New lport that takes references to all the exchange managers */ int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) { @@ -1929,7 +2110,15 @@ err: return -ENOMEM; } -struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, +/** + * fc_exch_mgr_alloc() - Allocate an exchange manager + * @lport: The local port that the new EM will be associated with + * @class: The default FC class for new exchanges + * @min_xid: The minimum XID for exchanges from the new EM + * @max_xid: The maximum XID for exchanges from the new EM + * @match: The match routine for the new EM + */ +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, enum fc_class class, u16 min_xid, u16 max_xid, bool (*match)(struct fc_frame *)) @@ -1942,7 +2131,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || (min_xid & fc_cpu_mask) != 0) { - FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", + FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; } @@ -1985,7 +2174,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, } kref_init(&mp->kref); - if (!fc_exch_mgr_add(lp, mp, match)) { + if (!fc_exch_mgr_add(lport, mp, match)) { free_percpu(mp->pool); goto free_mempool; } @@ -2006,6 +2195,10 @@ free_mp: } EXPORT_SYMBOL(fc_exch_mgr_alloc); +/** + * fc_exch_mgr_free() - Free all exchange managers on a local port + * @lport: The local port whose EMs are to be freed + */ void fc_exch_mgr_free(struct fc_lport *lport) { struct fc_exch_mgr_anchor *ema, *next; @@ -2015,10 +2208,12 @@ void fc_exch_mgr_free(struct fc_lport *lport) } EXPORT_SYMBOL(fc_exch_mgr_free); -/* - * Receive a frame +/** + * fc_exch_recv() - Handler for received frames + * @lport: The local port the frame was received on + * @fp: The received frame */ -void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) +void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_exch_mgr_anchor *ema; @@ -2026,8 +2221,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) u16 oxid; /* lport lock ? */ - if (!lp || lp->state == LPORT_ST_DISABLED) { - FC_LPORT_DBG(lp, "Receiving frames for an lport that " + if (!lport || lport->state == LPORT_ST_DISABLED) { + FC_LPORT_DBG(lport, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); return; @@ -2036,7 +2231,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) f_ctl = ntoh24(fh->fh_f_ctl); oxid = ntohs(fh->fh_ox_id); if (f_ctl & FC_FC_EX_CTX) { - list_for_each_entry(ema, &lp->ema_list, ema_list) { + list_for_each_entry(ema, &lport->ema_list, ema_list) { if ((oxid >= ema->mp->min_xid) && (oxid <= ema->mp->max_xid)) { found = 1; @@ -2045,13 +2240,13 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) } if (!found) { - FC_LPORT_DBG(lp, "Received response for out " + FC_LPORT_DBG(lport, "Received response for out " "of range oxid:%hx\n", oxid); fc_frame_free(fp); return; } } else - ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); + ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list); /* * If frame is marked invalid, just drop it. @@ -2070,37 +2265,42 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) else if (f_ctl & FC_FC_SEQ_CTX) fc_exch_recv_resp(ema->mp, fp); else - fc_exch_recv_req(lp, ema->mp, fp); + fc_exch_recv_req(lport, ema->mp, fp); break; default: - FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); + FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)", + fr_eof(fp)); fc_frame_free(fp); } } EXPORT_SYMBOL(fc_exch_recv); -int fc_exch_init(struct fc_lport *lp) +/** + * fc_exch_init() - Initialize the exchange layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_exch_init(struct fc_lport *lport) { - if (!lp->tt.seq_start_next) - lp->tt.seq_start_next = fc_seq_start_next; + if (!lport->tt.seq_start_next) + lport->tt.seq_start_next = fc_seq_start_next; - if (!lp->tt.exch_seq_send) - lp->tt.exch_seq_send = fc_exch_seq_send; + if (!lport->tt.exch_seq_send) + lport->tt.exch_seq_send = fc_exch_seq_send; - if (!lp->tt.seq_send) - lp->tt.seq_send = fc_seq_send; + if (!lport->tt.seq_send) + lport->tt.seq_send = fc_seq_send; - if (!lp->tt.seq_els_rsp_send) - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send; + if (!lport->tt.seq_els_rsp_send) + lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send; - if (!lp->tt.exch_done) - lp->tt.exch_done = fc_exch_done; + if (!lport->tt.exch_done) + lport->tt.exch_done = fc_exch_done; - if (!lp->tt.exch_mgr_reset) - lp->tt.exch_mgr_reset = fc_exch_mgr_reset; + if (!lport->tt.exch_mgr_reset) + lport->tt.exch_mgr_reset = fc_exch_mgr_reset; - if (!lp->tt.seq_exch_abort) - lp->tt.seq_exch_abort = fc_seq_exch_abort; + if (!lport->tt.seq_exch_abort) + lport->tt.seq_exch_abort = fc_seq_exch_abort; return 0; } @@ -2141,7 +2341,10 @@ int fc_setup_exch_mgr() return 0; } -void fc_destroy_exch_mgr(void) +/** + * fc_destroy_exch_mgr() - Destroy an exchange manager + */ +void fc_destroy_exch_mgr() { kmem_cache_destroy(fc_em_cachep); } diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 98279fe0d0c7..970b54f653b7 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -67,10 +67,16 @@ struct kmem_cache *scsi_pkt_cachep; #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) +/** + * struct fc_fcp_internal - FCP layer internal data + * @scsi_pkt_pool: Memory pool to draw FCP packets from + * @scsi_pkt_queue: Current FCP packets + * @throttled: The FCP packet queue is throttled + */ struct fc_fcp_internal { - mempool_t *scsi_pkt_pool; + mempool_t *scsi_pkt_pool; struct list_head scsi_pkt_queue; - u8 throttled; + u8 throttled; }; #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) @@ -84,9 +90,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_complete_locked(struct fc_fcp_pkt *); static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); -static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); +static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_timeout_error(struct fc_fcp_pkt *); -static void fc_fcp_timeout(unsigned long data); +static void fc_fcp_timeout(unsigned long); static void fc_fcp_rec(struct fc_fcp_pkt *); static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); @@ -125,23 +131,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_FCP_DFLT_QUEUE_DEPTH 32 /** - * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet - * @lp: fc lport struct - * @gfp: gfp flags for allocation + * fc_fcp_pkt_alloc() - Allocate a fcp_pkt + * @lport: The local port that the FCP packet is for + * @gfp: GFP flags for allocation * - * This is used by upper layer scsi driver. - * Return Value : scsi_pkt structure or null on allocation failure. - * Context : call from process context. no locking required. + * Return value: fcp_pkt structure or null on allocation failure. + * Context: Can be called from process context, no lock is required. */ -static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) +static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); struct fc_fcp_pkt *fsp; fsp = mempool_alloc(si->scsi_pkt_pool, gfp); if (fsp) { memset(fsp, 0, sizeof(*fsp)); - fsp->lp = lp; + fsp->lp = lport; atomic_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); INIT_LIST_HEAD(&fsp->list); @@ -151,12 +156,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) } /** - * fc_fcp_pkt_release() - release hold on scsi_pkt packet - * @fsp: fcp packet struct + * fc_fcp_pkt_release() - Release hold on a fcp_pkt + * @fsp: The FCP packet to be released * - * This is used by upper layer scsi driver. - * Context : call from process and interrupt context. - * no locking required + * Context: Can be called from process or interrupt context, + * no lock is required. */ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) { @@ -167,20 +171,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) } } +/** + * fc_fcp_pkt_hold() - Hold a fcp_pkt + * @fsp: The FCP packet to be held + */ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) { atomic_inc(&fsp->ref_cnt); } /** - * fc_fcp_pkt_destory() - release hold on scsi_pkt packet - * @seq: exchange sequence - * @fsp: fcp packet struct + * fc_fcp_pkt_destory() - Release hold on a fcp_pkt + * @seq: The sequence that the FCP packet is on (required by destructor API) + * @fsp: The FCP packet to be released + * + * This routine is called by a destructor callback in the exch_seq_send() + * routine of the libfc Transport Template. The 'struct fc_seq' is a required + * argument even though it is not used by this routine. * - * Release hold on scsi_pkt packet set to keep scsi_pkt - * till EM layer exch resource is not freed. - * Context : called from from EM layer. - * no locking required + * Context: No locking required. */ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) { @@ -188,10 +197,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) } /** - * fc_fcp_lock_pkt() - lock a packet and get a ref to it. - * @fsp: fcp packet + * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count + * @fsp: The FCP packet to be locked and incremented * - * We should only return error if we return a command to scsi-ml before + * We should only return error if we return a command to SCSI-ml before * getting a response. This can happen in cases where we send a abort, but * do not wait for the response and the abort and command can be passing * each other on the wire/network-layer. @@ -216,18 +225,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) return 0; } +/** + * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its + * reference count + * @fsp: The FCP packet to be unlocked and decremented + */ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) { spin_unlock_bh(&fsp->scsi_pkt_lock); fc_fcp_pkt_release(fsp); } +/** + * fc_fcp_timer_set() - Start a timer for a fcp_pkt + * @fsp: The FCP packet to start a timer for + * @delay: The timeout period for the timer + */ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) { if (!(fsp->state & FC_SRB_COMPL)) mod_timer(&fsp->timer, jiffies + delay); } +/** + * fc_fcp_send_abort() - Send an abort for exchanges associated with a + * fcp_pkt + * @fsp: The FCP packet to abort exchanges on + */ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) { if (!fsp->seq_ptr) @@ -237,9 +261,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); } -/* - * Retry command. - * An abort isn't needed. +/** + * fc_fcp_retry_cmd() - Retry a fcp_pkt + * @fsp: The FCP packet to be retried + * + * Sets the status code to be FC_ERROR and then calls + * fc_fcp_complete_locked() which in turn calls fc_io_compl(). + * fc_io_compl() will notify the SCSI-ml that the I/O is done. + * The SCSI-ml will retry the command. */ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) { @@ -254,43 +283,35 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) fc_fcp_complete_locked(fsp); } -/* - * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP - * transfer for a read I/O indicated by the fc_fcp_pkt. - * @fsp: ptr to the fc_fcp_pkt - * - * This is called in exch_seq_send() when we have a newly allocated - * exchange with a valid exchange id to setup ddp. - * - * returns: none +/** + * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context + * @fsp: The FCP packet that will manage the DDP frames + * @xid: The XID that will be used for the DDP exchange */ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) { - struct fc_lport *lp; + struct fc_lport *lport; if (!fsp) return; - lp = fsp->lp; + lport = fsp->lp; if ((fsp->req_flags & FC_SRB_READ) && - (lp->lro_enabled) && (lp->tt.ddp_setup)) { - if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), - scsi_sg_count(fsp->cmd))) + (lport->lro_enabled) && (lport->tt.ddp_setup)) { + if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), + scsi_sg_count(fsp->cmd))) fsp->xfer_ddp = xid; } } -/* - * fc_fcp_ddp_done - calls to LLD's ddp_done to release any - * DDP related resources for this I/O if it is initialized - * as a ddp transfer - * @fsp: ptr to the fc_fcp_pkt - * - * returns: none +/** + * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any + * DDP related resources for a fcp_pkt + * @fsp: The FCP packet that DDP had been used on */ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp; + struct fc_lport *lport; if (!fsp) return; @@ -298,22 +319,22 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) if (fsp->xfer_ddp == FC_XID_UNKNOWN) return; - lp = fsp->lp; - if (lp->tt.ddp_done) { - fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); + lport = fsp->lp; + if (lport->tt.ddp_done) { + fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); fsp->xfer_ddp = FC_XID_UNKNOWN; } } - -/* - * Receive SCSI data from target. - * Called after receiving solicited data. +/** + * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target + * @fsp: The FCP packet the data is on + * @fp: The data frame */ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { struct scsi_cmnd *sc = fsp->cmd; - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fcoe_dev_stats *stats; struct fc_frame_header *fh; size_t start_offset; @@ -363,13 +384,13 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) if (~crc != le32_to_cpu(fr_crc(fp))) { crc_err: - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); stats->ErrorFrames++; /* FIXME - per cpu count, not total count! */ if (stats->InvalidCRCCount++ < 5) printk(KERN_WARNING "libfc: CRC error on data " "frame for port (%6x)\n", - fc_host_port_id(lp->host)); + fc_host_port_id(lport->host)); /* * Assume the frame is total garbage. * We may have copied it over the good part @@ -397,18 +418,17 @@ crc_err: } /** - * fc_fcp_send_data() - Send SCSI data to target. - * @fsp: ptr to fc_fcp_pkt - * @sp: ptr to this sequence - * @offset: starting offset for this data request - * @seq_blen: the burst length for this data request + * fc_fcp_send_data() - Send SCSI data to a target + * @fsp: The FCP packet the data is on + * @sp: The sequence the data is to be sent on + * @offset: The starting offset for this data request + * @seq_blen: The burst length for this data request * * Called after receiving a Transfer Ready data descriptor. - * if LLD is capable of seq offload then send down seq_blen - * size of data in single frame, otherwise send multiple FC - * frames of max FC frame payload supported by target port. - * - * Returns : 0 for success. + * If the LLD is capable of sequence offload then send down the + * seq_blen ammount of data in single frame, otherwise send + * multiple frames of the maximum frame payload supported by + * the target port. */ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, size_t offset, size_t seq_blen) @@ -417,7 +437,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, struct scsi_cmnd *sc; struct scatterlist *sg; struct fc_frame *fp = NULL; - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; size_t remaining; size_t t_blen; size_t tlen; @@ -426,7 +446,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, int error; void *data = NULL; void *page_addr; - int using_sg = lp->sg_supp; + int using_sg = lport->sg_supp; u32 f_ctl; WARN_ON(seq_blen <= 0); @@ -448,10 +468,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, * to max FC frame payload previously set in fsp->max_payload. */ t_blen = fsp->max_payload; - if (lp->seq_offload) { - t_blen = min(seq_blen, (size_t)lp->lso_max); + if (lport->seq_offload) { + t_blen = min(seq_blen, (size_t)lport->lso_max); FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", - fsp, seq_blen, lp->lso_max, t_blen); + fsp, seq_blen, lport->lso_max, t_blen); } WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); @@ -463,7 +483,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, remaining = seq_blen; fh_parm_offset = frame_offset = offset; tlen = 0; - seq = lp->tt.seq_start_next(seq); + seq = lport->tt.seq_start_next(seq); f_ctl = FC_FC_REL_OFF; WARN_ON(!seq); @@ -486,11 +506,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, if (tlen % 4) using_sg = 0; if (using_sg) { - fp = _fc_frame_alloc(lp, 0); + fp = _fc_frame_alloc(lport, 0); if (!fp) return -ENOMEM; } else { - fp = fc_frame_alloc(lp, tlen); + fp = fc_frame_alloc(lport, tlen); if (!fp) return -ENOMEM; @@ -550,7 +570,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, /* * send fragment using for a sequence. */ - error = lp->tt.seq_send(lp, seq, fp); + error = lport->tt.seq_send(lport, seq, fp); if (error) { WARN_ON(1); /* send error should be rare */ fc_fcp_retry_cmd(fsp); @@ -562,6 +582,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, return 0; } +/** + * fc_fcp_abts_resp() - Send an ABTS response + * @fsp: The FCP packet that is being aborted + * @fp: The response frame + */ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { int ba_done = 1; @@ -598,8 +623,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) } /** - * fc_fcp_reduce_can_queue() - drop can_queue - * @lp: lport to drop queueing for + * fc_fcp_reduce_can_queue() - Reduce the can_queue value for a local port + * @lport: The local port to reduce can_queue on * * If we are getting memory allocation failures, then we may * be trying to execute too many commands. We let the running @@ -607,37 +632,36 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * can_queue. Eventually we will hit the point where we run * on all reserved structs. */ -static void fc_fcp_reduce_can_queue(struct fc_lport *lp) +static void fc_fcp_reduce_can_queue(struct fc_lport *lport) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); unsigned long flags; int can_queue; - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); if (si->throttled) goto done; si->throttled = 1; - can_queue = lp->host->can_queue; + can_queue = lport->host->can_queue; can_queue >>= 1; if (!can_queue) can_queue = 1; - lp->host->can_queue = can_queue; - shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" "Reducing can_queue to %d.\n", can_queue); done: - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); } /** - * fc_fcp_recv() - Reveive FCP frames + * fc_fcp_recv() - Reveive an FCP frame * @seq: The sequence the frame is on - * @fp: The FC frame + * @fp: The received frame * @arg: The related FCP packet * - * Return : None - * Context : called from Soft IRQ context - * can not called holding list lock + * Context: Called from Soft IRQ context. Can not be called + * holding the FCP packet list lock. */ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -710,6 +734,11 @@ errout: fc_fcp_reduce_can_queue(lport); } +/** + * fc_fcp_resp() - Handler for FCP responses + * @fsp: The FCP packet the response is for + * @fp: The response frame + */ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { struct fc_frame_header *fh; @@ -823,15 +852,16 @@ err: } /** - * fc_fcp_complete_locked() - complete processing of a fcp packet - * @fsp: fcp packet + * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the + * fcp_pkt lock held + * @fsp: The FCP packet to be completed * * This function may sleep if a timer is pending. The packet lock must be * held, and the host lock must not be held. */ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fc_seq *seq; struct fc_exch *ep; u32 f_ctl; @@ -862,7 +892,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) struct fc_frame *conf_frame; struct fc_seq *csp; - csp = lp->tt.seq_start_next(seq); + csp = lport->tt.seq_start_next(seq); conf_frame = fc_frame_alloc(fsp->lp, 0); if (conf_frame) { f_ctl = FC_FC_SEQ_INIT; @@ -871,43 +901,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, 0); - lp->tt.seq_send(lp, csp, conf_frame); + lport->tt.seq_send(lport, csp, conf_frame); } } - lp->tt.exch_done(seq); + lport->tt.exch_done(seq); } fc_io_compl(fsp); } +/** + * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt + * @fsp: The FCP packet whose exchanges should be canceled + * @error: The reason for the cancellation + */ static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; if (fsp->seq_ptr) { - lp->tt.exch_done(fsp->seq_ptr); + lport->tt.exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->status_code = error; } /** - * fc_fcp_cleanup_each_cmd() - Cleanup active commads - * @lp: logical port - * @id: target id - * @lun: lun - * @error: fsp status code + * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port + * @lport: The local port whose exchanges should be canceled + * @id: The target's ID + * @lun: The LUN + * @error: The reason for cancellation * * If lun or id is -1, they are ignored. */ -static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, +static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, unsigned int lun, int error) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); struct fc_fcp_pkt *fsp; struct scsi_cmnd *sc_cmd; unsigned long flags; - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); restart: list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { sc_cmd = fsp->cmd; @@ -918,7 +953,7 @@ restart: continue; fc_fcp_pkt_hold(fsp); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); if (!fc_fcp_lock_pkt(fsp)) { fc_fcp_cleanup_cmd(fsp, error); @@ -927,35 +962,36 @@ restart: } fc_fcp_pkt_release(fsp); - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); /* * while we dropped the lock multiple pkts could * have been released, so we have to start over. */ goto restart; } - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); } -static void fc_fcp_abort_io(struct fc_lport *lp) +/** + * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port + * @lport: The local port whose exchanges are to be aborted + */ +static void fc_fcp_abort_io(struct fc_lport *lport) { - fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); } /** - * fc_fcp_pkt_send() - send a fcp packet to the lower level. - * @lp: fc lport - * @fsp: fc packet. + * fc_fcp_pkt_send() - Send a fcp_pkt + * @lport: The local port to send the FCP packet on + * @fsp: The FCP packet to send * - * This is called by upper layer protocol. - * Return : zero for success and -1 for failure - * Context : called from queuecommand which can be called from process - * or scsi soft irq. - * Locks : called with the host lock and irqs disabled. + * Return: Zero for success and -1 for failure + * Locks: Called with the host lock and irqs disabled. */ -static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) +static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); int rc; fsp->cmd->SCp.ptr = (char *)fsp; @@ -967,16 +1003,22 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); list_add_tail(&fsp->list, &si->scsi_pkt_queue); - spin_unlock_irq(lp->host->host_lock); - rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); - spin_lock_irq(lp->host->host_lock); + spin_unlock_irq(lport->host->host_lock); + rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); + spin_lock_irq(lport->host->host_lock); if (rc) list_del(&fsp->list); return rc; } -static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, +/** + * fc_fcp_cmd_send() - Send a FCP command + * @lport: The local port to send the command on + * @fsp: The FCP packet the command is on + * @resp: The handler for the response + */ +static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg)) @@ -984,14 +1026,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, struct fc_frame *fp; struct fc_seq *seq; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; const size_t len = sizeof(fsp->cdb_cmd); int rc = 0; if (fc_fcp_lock_pkt(fsp)) return 0; - fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); + fp = fc_frame_alloc(lport, sizeof(fsp->cdb_cmd)); if (!fp) { rc = -1; goto unlock; @@ -1001,13 +1043,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, fr_fsp(fp) = fsp; rport = fsp->rport; fsp->max_payload = rport->maxframe_size; - rp = rport->dd_data; + rpriv = rport->dd_data; fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); + seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, + fsp, 0); if (!seq) { rc = -1; goto unlock; @@ -1025,8 +1068,10 @@ unlock: return rc; } -/* - * transport error handler +/** + * fc_fcp_error() - Handler for FCP layer errors + * @fsp: The FCP packet the error is on + * @fp: The frame that has errored */ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { @@ -1051,9 +1096,11 @@ unlock: fc_fcp_unlock_pkt(fsp); } -/* - * Scsi abort handler- calls to send an abort - * and then wait for abort completion +/** + * fc_fcp_pkt_abort() - Abort a fcp_pkt + * @fsp: The FCP packet to abort on + * + * Called to send an abort and then wait for abort completion */ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) { @@ -1082,14 +1129,15 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) return rc; } -/* - * Retry LUN reset after resource allocation failed. +/** + * fc_lun_reset_send() - Send LUN reset command + * @data: The FCP packet that identifies the LUN to be reset */ static void fc_lun_reset_send(unsigned long data) { struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; - struct fc_lport *lp = fsp->lp; - if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { + struct fc_lport *lport = fsp->lp; + if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) return; if (fc_fcp_lock_pkt(fsp)) @@ -1100,11 +1148,15 @@ static void fc_lun_reset_send(unsigned long data) } } -/* - * Scsi device reset handler- send a LUN RESET to the device - * and wait for reset reply +/** + * fc_lun_reset() - Send a LUN RESET command to a device + * and wait for the reply + * @lport: The local port to sent the comand on + * @fsp: The FCP packet that identifies the LUN to be reset + * @id: The SCSI command ID + * @lun: The LUN ID to be reset */ -static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, +static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, unsigned int id, unsigned int lun) { int rc; @@ -1132,14 +1184,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, spin_lock_bh(&fsp->scsi_pkt_lock); if (fsp->seq_ptr) { - lp->tt.exch_done(fsp->seq_ptr); + lport->tt.exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->wait_for_comp = 0; spin_unlock_bh(&fsp->scsi_pkt_lock); if (!rc) { - FC_SCSI_DBG(lp, "lun reset failed\n"); + FC_SCSI_DBG(lport, "lun reset failed\n"); return FAILED; } @@ -1147,13 +1199,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, if (fsp->cdb_status != FCP_TMF_CMPL) return FAILED; - FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); - fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); + FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); + fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); return SUCCESS; } -/* - * Task Managment response handler +/** + * fc_tm_done() - Task Managment response handler + * @seq: The sequence that the response is on + * @fp: The response frame + * @arg: The FCP packet the response is for */ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1190,34 +1245,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) fc_fcp_unlock_pkt(fsp); } -static void fc_fcp_cleanup(struct fc_lport *lp) +/** + * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port + * @lport: The local port to be cleaned up + */ +static void fc_fcp_cleanup(struct fc_lport *lport) { - fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); } -/* - * fc_fcp_timeout: called by OS timer function. - * - * The timer has been inactivated and must be reactivated if desired - * using fc_fcp_timer_set(). - * - * Algorithm: - * - * If REC is supported, just issue it, and return. The REC exchange will - * complete or time out, and recovery can continue at that point. - * - * Otherwise, if the response has been received without all the data, - * it has been ER_TIMEOUT since the response was received. +/** + * fc_fcp_timeout() - Handler for fcp_pkt timeouts + * @data: The FCP packet that has timed out * - * If the response has not been received, - * we see if data was received recently. If it has been, we continue waiting, - * otherwise, we abort the command. + * If REC is supported then just issue it and return. The REC exchange will + * complete or time out and recovery can continue at that point. Otherwise, + * if the response has been received without all the data it has been + * ER_TIMEOUT since the response was received. If the response has not been + * received we see if data was received recently. If it has been then we + * continue waiting, otherwise, we abort the command. */ static void fc_fcp_timeout(unsigned long data) { struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; struct fc_rport *rport = fsp->rport; - struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fc_rport_libfc_priv *rpriv = rport->dd_data; if (fc_fcp_lock_pkt(fsp)) return; @@ -1227,7 +1279,7 @@ static void fc_fcp_timeout(unsigned long data) fsp->state |= FC_SRB_FCP_PROCESSING_TMO; - if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) + if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) fc_fcp_rec(fsp); else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), jiffies)) @@ -1241,35 +1293,37 @@ unlock: fc_fcp_unlock_pkt(fsp); } -/* - * Send a REC ELS request +/** + * fc_fcp_rec() - Send a REC ELS request + * @fsp: The FCP packet to send the REC request on */ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_frame *fp; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; - lp = fsp->lp; + lport = fsp->lp; rport = fsp->rport; - rp = rport->dd_data; - if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { + rpriv = rport->dd_data; + if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { fsp->status_code = FC_HRD_ERROR; fsp->io_status = 0; fc_fcp_complete_locked(fsp); return; } - fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_rec)); if (!fp) goto retry; fr_seq(fp) = fsp->seq_ptr; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { + if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, + fc_fcp_rec_resp, fsp, + jiffies_to_msecs(FC_SCSI_REC_TOV))) { fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; } @@ -1280,12 +1334,16 @@ retry: fc_timeout_error(fsp); } -/* - * Receive handler for REC ELS frame - * if it is a reject then let the scsi layer to handle - * the timeout. if it is a LS_ACC then if the io was not completed - * then set the timeout and return otherwise complete the exchange - * and tell the scsi layer to restart the I/O. +/** + * fc_fcp_rec_resp() - Handler for REC ELS responses + * @seq: The sequence the response is on + * @fp: The response frame + * @arg: The FCP packet the response is on + * + * If the response is a reject then the scsi layer will handle + * the timeout. If the response is a LS_ACC then if the I/O was not completed + * set the timeout and return. If the I/O was completed then complete the + * exchange and tell the SCSI layer. */ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1297,7 +1355,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) u32 offset; enum dma_data_direction data_dir; enum fc_rctl r_ctl; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; if (IS_ERR(fp)) { fc_fcp_rec_error(fsp, fp); @@ -1320,13 +1378,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) /* fall through */ case ELS_RJT_UNSUP: FC_FCP_DBG(fsp, "device does not support REC\n"); - rp = fsp->rport->dd_data; + rpriv = fsp->rport->dd_data; /* * if we do not spport RECs or got some bogus * reason then resetup timer so we check for * making progress. */ - rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; + rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); break; case ELS_RJT_LOGIC: @@ -1423,8 +1481,10 @@ out: fc_frame_free(fp); } -/* - * Handle error response or timeout for REC exchange. +/** + * fc_fcp_rec_error() - Handler for REC errors + * @fsp: The FCP packet the error is on + * @fp: The REC frame */ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { @@ -1463,10 +1523,9 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ } -/* - * Time out error routine: - * abort's the I/O close the exchange and - * send completion notification to scsi layer +/** + * fc_timeout_error() - Handler for fcp_pkt timeouts + * @fsp: The FCP packt that has timed out */ static void fc_timeout_error(struct fc_fcp_pkt *fsp) { @@ -1480,16 +1539,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp) fc_fcp_send_abort(fsp); } -/* - * Sequence retransmission request. +/** + * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) + * @fsp: The FCP packet the SRR is to be sent on + * @r_ctl: The R_CTL field for the SRR request * This is called after receiving status but insufficient data, or * when expecting status but the request has timed out. */ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); struct fc_seq *seq; struct fcp_srr *srr; @@ -1497,12 +1558,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) u8 cdb_op; rport = fsp->rport; - rp = rport->dd_data; + rpriv = rport->dd_data; cdb_op = fsp->cdb_cmd.fc_cdb[0]; - if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) + if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || + rpriv->rp_state != RPORT_ST_READY) goto retry; /* shouldn't happen */ - fp = fc_frame_alloc(lp, sizeof(*srr)); + fp = fc_frame_alloc(lport, sizeof(*srr)); if (!fp) goto retry; @@ -1515,11 +1577,11 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) srr->srr_rel_off = htonl(offset); fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); + seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); if (!seq) goto retry; @@ -1533,8 +1595,11 @@ retry: fc_fcp_retry_cmd(fsp); } -/* - * Handle response from SRR. +/** + * fc_fcp_srr_resp() - Handler for SRR response + * @seq: The sequence the SRR is on + * @fp: The SRR frame + * @arg: The FCP packet the SRR is on */ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1580,6 +1645,11 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ } +/** + * fc_fcp_srr_error() - Handler for SRR errors + * @fsp: The FCP packet that the SRR error is on + * @fp: The SRR frame + */ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { if (fc_fcp_lock_pkt(fsp)) @@ -1604,31 +1674,36 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ } -static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) +/** + * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready + * @lport: The local port to be checked + */ +static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) { /* lock ? */ - return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; + return (lport->state == LPORT_ST_READY) && + lport->link_up && !lport->qfull; } /** - * fc_queuecommand - The queuecommand function of the scsi template - * @cmd: struct scsi_cmnd to be executed - * @done: Callback function to be called when cmd is completed + * fc_queuecommand() - The queuecommand function of the SCSI template + * @cmd: The scsi_cmnd to be executed + * @done: The callback function to be called when the scsi_cmnd is complete * - * this is the i/o strategy routine, called by the scsi layer - * this routine is called with holding the host_lock. + * This is the i/o strategy routine, called by the SCSI layer. This routine + * is called with the host_lock held. */ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_fcp_pkt *fsp; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; int rval; int rc = 0; struct fcoe_dev_stats *stats; - lp = shost_priv(sc_cmd->device->host); + lport = shost_priv(sc_cmd->device->host); rval = fc_remote_port_chkready(rport); if (rval) { @@ -1647,14 +1722,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) goto out; } - rp = rport->dd_data; + rpriv = rport->dd_data; - if (!fc_fcp_lport_queue_ready(lp)) { + if (!fc_fcp_lport_queue_ready(lport)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } - fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); + fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); if (fsp == NULL) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; @@ -1664,7 +1739,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) * build the libfc request pkt */ fsp->cmd = sc_cmd; /* save the cmd */ - fsp->lp = lp; /* save the softc ptr */ + fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ fsp->xfer_ddp = FC_XID_UNKNOWN; sc_cmd->scsi_done = done; @@ -1678,7 +1753,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) /* * setup the data direction */ - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { fsp->req_flags = FC_SRB_READ; stats->InputRequests++; @@ -1692,7 +1767,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) stats->ControlRequests++; } - fsp->tgt_flags = rp->flags; + fsp->tgt_flags = rpriv->flags; init_timer(&fsp->timer); fsp->timer.data = (unsigned long)fsp; @@ -1702,7 +1777,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) * if we get -1 return then put the request in the pending * queue. */ - rval = fc_fcp_pkt_send(lp, fsp); + rval = fc_fcp_pkt_send(lport, fsp); if (rval != 0) { fsp->state = FC_SRB_FREE; fc_fcp_pkt_release(fsp); @@ -1714,18 +1789,17 @@ out: EXPORT_SYMBOL(fc_queuecommand); /** - * fc_io_compl() - Handle responses for completed commands - * @fsp: scsi packet - * - * Translates a error to a Linux SCSI error. + * fc_io_compl() - Handle responses for completed commands + * @fsp: The FCP packet that is complete * + * Translates fcp_pkt errors to a Linux SCSI errors. * The fcp packet lock must be held when calling. */ static void fc_io_compl(struct fc_fcp_pkt *fsp) { struct fc_fcp_internal *si; struct scsi_cmnd *sc_cmd; - struct fc_lport *lp; + struct fc_lport *lport; unsigned long flags; /* release outstanding ddp context */ @@ -1738,11 +1812,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) spin_lock_bh(&fsp->scsi_pkt_lock); } - lp = fsp->lp; - si = fc_get_scsi_internal(lp); - spin_lock_irqsave(lp->host->host_lock, flags); + lport = fsp->lp; + si = fc_get_scsi_internal(lport); + spin_lock_irqsave(lport->host->host_lock, flags); if (!fsp->cmd) { - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return; } @@ -1759,7 +1833,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) fsp->cmd = NULL; if (!sc_cmd->SCp.ptr) { - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return; } @@ -1826,7 +1900,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) list_del(&fsp->list); sc_cmd->SCp.ptr = NULL; sc_cmd->scsi_done(sc_cmd); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); /* release ref from initial allocation in queue command */ fc_fcp_pkt_release(fsp); @@ -1834,35 +1908,34 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) /** * fc_eh_abort() - Abort a command - * @sc_cmd: scsi command to abort + * @sc_cmd: The SCSI command to abort * - * From scsi host template. - * send ABTS to the target device and wait for the response - * sc_cmd is the pointer to the command to be aborted. + * From SCSI host template. + * Send an ABTS to the target device and wait for the response. */ int fc_eh_abort(struct scsi_cmnd *sc_cmd) { struct fc_fcp_pkt *fsp; - struct fc_lport *lp; + struct fc_lport *lport; int rc = FAILED; unsigned long flags; - lp = shost_priv(sc_cmd->device->host); - if (lp->state != LPORT_ST_READY) + lport = shost_priv(sc_cmd->device->host); + if (lport->state != LPORT_ST_READY) return rc; - else if (!lp->link_up) + else if (!lport->link_up) return rc; - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); fsp = CMD_SP(sc_cmd); if (!fsp) { /* command completed while scsi eh was setting up */ - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return SUCCESS; } /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ fc_fcp_pkt_hold(fsp); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); if (fc_fcp_lock_pkt(fsp)) { /* completed while we were waiting for timer to be deleted */ @@ -1880,34 +1953,32 @@ release_pkt: EXPORT_SYMBOL(fc_eh_abort); /** - * fc_eh_device_reset() Reset a single LUN - * @sc_cmd: scsi command + * fc_eh_device_reset() - Reset a single LUN + * @sc_cmd: The SCSI command which identifies the device whose + * LUN is to be reset * - * Set from scsi host template to send tm cmd to the target and wait for the - * response. + * Set from SCSI host template. */ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_fcp_pkt *fsp; struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); int rc = FAILED; - struct fc_rport_libfc_priv *rp; int rval; rval = fc_remote_port_chkready(rport); if (rval) goto out; - rp = rport->dd_data; - lp = shost_priv(sc_cmd->device->host); + lport = shost_priv(sc_cmd->device->host); - if (lp->state != LPORT_ST_READY) + if (lport->state != LPORT_ST_READY) return rc; - FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); + FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id); - fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); + fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); if (fsp == NULL) { printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); sc_cmd->result = DID_NO_CONNECT << 16; @@ -1919,13 +1990,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) * the sc passed in is not setup for execution like when sent * through the queuecommand callout. */ - fsp->lp = lp; /* save the softc ptr */ + fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ /* * flush outstanding commands */ - rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); + rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); fsp->state = FC_SRB_FREE; fc_fcp_pkt_release(fsp); @@ -1935,38 +2006,39 @@ out: EXPORT_SYMBOL(fc_eh_device_reset); /** - * fc_eh_host_reset() - The reset function will reset the ports on the host. - * @sc_cmd: scsi command + * fc_eh_host_reset() - Reset a Scsi_Host. + * @sc_cmd: The SCSI command that identifies the SCSI host to be reset */ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) { struct Scsi_Host *shost = sc_cmd->device->host; - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); unsigned long wait_tmo; - FC_SCSI_DBG(lp, "Resetting host\n"); + FC_SCSI_DBG(lport, "Resetting host\n"); - lp->tt.lport_reset(lp); + lport->tt.lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; - while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) + while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, + wait_tmo)) msleep(1000); - if (fc_fcp_lport_queue_ready(lp)) { + if (fc_fcp_lport_queue_ready(lport)) { shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " - "on port (%6x)\n", fc_host_port_id(lp->host)); + "on port (%6x)\n", fc_host_port_id(lport->host)); return SUCCESS; } else { shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " "port (%6x) is not ready.\n", - fc_host_port_id(lp->host)); + fc_host_port_id(lport->host)); return FAILED; } } EXPORT_SYMBOL(fc_eh_host_reset); /** - * fc_slave_alloc() - configure queue depth - * @sdev: scsi device + * fc_slave_alloc() - Configure the queue depth of a Scsi_Host + * @sdev: The SCSI device that identifies the SCSI host * * Configures queue depth based on host's cmd_per_len. If not set * then we use the libfc default. @@ -1988,6 +2060,12 @@ int fc_slave_alloc(struct scsi_device *sdev) } EXPORT_SYMBOL(fc_slave_alloc); +/** + * fc_change_queue_depth() - Change a device's queue depth + * @sdev: The SCSI device whose queue depth is to change + * @qdepth: The new queue depth + * @reason: The resason for the change + */ int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { switch (reason) { @@ -2007,6 +2085,11 @@ int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) } EXPORT_SYMBOL(fc_change_queue_depth); +/** + * fc_change_queue_type() - Change a device's queue type + * @sdev: The SCSI device whose queue depth is to change + * @tag_type: Identifier for queue type + */ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { @@ -2022,17 +2105,21 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) } EXPORT_SYMBOL(fc_change_queue_type); -void fc_fcp_destroy(struct fc_lport *lp) +/** + * fc_fcp_destory() - Tear down the FCP layer for a given local port + * @lport: The local port that no longer needs the FCP layer + */ +void fc_fcp_destroy(struct fc_lport *lport) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); if (!list_empty(&si->scsi_pkt_queue)) printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " - "port (%6x)\n", fc_host_port_id(lp->host)); + "port (%6x)\n", fc_host_port_id(lport->host)); mempool_destroy(si->scsi_pkt_pool); kfree(si); - lp->scsi_priv = NULL; + lport->scsi_priv = NULL; } EXPORT_SYMBOL(fc_fcp_destroy); @@ -2058,24 +2145,28 @@ void fc_destroy_fcp() kmem_cache_destroy(scsi_pkt_cachep); } -int fc_fcp_init(struct fc_lport *lp) +/** + * fc_fcp_init() - Initialize the FCP layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_fcp_init(struct fc_lport *lport) { int rc; struct fc_fcp_internal *si; - if (!lp->tt.fcp_cmd_send) - lp->tt.fcp_cmd_send = fc_fcp_cmd_send; + if (!lport->tt.fcp_cmd_send) + lport->tt.fcp_cmd_send = fc_fcp_cmd_send; - if (!lp->tt.fcp_cleanup) - lp->tt.fcp_cleanup = fc_fcp_cleanup; + if (!lport->tt.fcp_cleanup) + lport->tt.fcp_cleanup = fc_fcp_cleanup; - if (!lp->tt.fcp_abort_io) - lp->tt.fcp_abort_io = fc_fcp_abort_io; + if (!lport->tt.fcp_abort_io) + lport->tt.fcp_abort_io = fc_fcp_abort_io; si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); if (!si) return -ENOMEM; - lp->scsi_priv = si; + lport->scsi_priv = si; INIT_LIST_HEAD(&si->scsi_pkt_queue); si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 295eafb0316f..39f4b6ab04b4 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -75,7 +75,7 @@ module_exit(libfc_exit); /** * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer - * into a scatter-gather list (SG list). + * into a scatter-gather list (SG list). * * @buf: pointer to the data buffer. * @len: the byte-length of the data buffer. @@ -84,7 +84,7 @@ module_exit(libfc_exit); * @offset: pointer to the current offset in the SG list. * @km_type: dedicated page table slot type for kmap_atomic. * @crc: pointer to the 32-bit crc value. - * If crc is NULL, CRC is not calculated. + * If crc is NULL, CRC is not calculated. */ u32 fc_copy_buffer_to_sglist(void *buf, size_t len, struct scatterlist *sg, diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index e4b5e9280cb0..741fd5c72e13 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -22,22 +22,22 @@ #define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ #define FC_LPORT_LOGGING 0x02 /* lport layer logging */ -#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ +#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ #define FC_RPORT_LOGGING 0x08 /* rport layer logging */ -#define FC_FCP_LOGGING 0x10 /* I/O path logging */ -#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ -#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ -#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ +#define FC_FCP_LOGGING 0x10 /* I/O path logging */ +#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ +#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ +#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ extern unsigned int fc_debug_logging; -#define FC_CHECK_LOGGING(LEVEL, CMD) \ -do { \ - if (unlikely(fc_debug_logging & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ -} while (0) +#define FC_CHECK_LOGGING(LEVEL, CMD) \ + do { \ + if (unlikely(fc_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ + } while (0) #define FC_LIBFC_DBG(fmt, args...) \ FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ @@ -49,10 +49,10 @@ do { \ (lport)->host->host_no, \ fc_host_port_id((lport)->host), ##args)) -#define FC_DISC_DBG(disc, fmt, args...) \ - FC_CHECK_LOGGING(FC_DISC_LOGGING, \ - printk(KERN_INFO "host%u: disc: " fmt, \ - (disc)->lport->host->host_no, \ +#define FC_DISC_DBG(disc, fmt, args...) \ + FC_CHECK_LOGGING(FC_DISC_LOGGING, \ + printk(KERN_INFO "host%u: disc: " fmt, \ + (disc)->lport->host->host_no, \ ##args)) #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ @@ -77,7 +77,7 @@ do { \ exch->xid, ##args)) #define FC_SCSI_DBG(lport, fmt, args...) \ - FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ + FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ printk(KERN_INFO "host%u: scsi: " fmt, \ (lport)->host->host_no, ##args)) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 2162e6b0f43e..90930c435455 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -133,7 +133,7 @@ static const char *fc_lport_state_names[] = { * @job: The passthrough job * @lport: The local port to pass through a command * @rsp_code: The expected response code - * @sg: job->reply_payload.sg_list + * @sg: job->reply_payload.sg_list * @nents: job->reply_payload.sg_cnt * @offset: The offset into the response data */ @@ -146,6 +146,11 @@ struct fc_bsg_info { size_t offset; }; +/** + * fc_frame_drop() - Dummy frame handler + * @lport: The local port the frame was received on + * @fp: The received frame + */ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) { fc_frame_free(fp); @@ -172,7 +177,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, switch (event) { case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { - lport->dns_rp = rdata; + lport->dns_rdata = rdata; fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); } else { FC_LPORT_DBG(lport, "Received an READY event " @@ -187,7 +192,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: - lport->dns_rp = NULL; + lport->dns_rdata = NULL; break; case RPORT_EV_NONE: break; @@ -211,8 +216,8 @@ static const char *fc_lport_state(struct fc_lport *lport) /** * fc_lport_ptp_setup() - Create an rport for point-to-point mode - * @lport: The lport to attach the ptp rport to - * @fid: The FID of the ptp rport + * @lport: The lport to attach the ptp rport to + * @remote_fid: The FID of the ptp rport * @remote_wwpn: The WWPN of the ptp rport * @remote_wwnn: The WWNN of the ptp rport */ @@ -221,18 +226,22 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, u64 remote_wwnn) { mutex_lock(&lport->disc.disc_mutex); - if (lport->ptp_rp) - lport->tt.rport_logoff(lport->ptp_rp); - lport->ptp_rp = lport->tt.rport_create(lport, remote_fid); - lport->ptp_rp->ids.port_name = remote_wwpn; - lport->ptp_rp->ids.node_name = remote_wwnn; + if (lport->ptp_rdata) + lport->tt.rport_logoff(lport->ptp_rdata); + lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); + lport->ptp_rdata->ids.port_name = remote_wwpn; + lport->ptp_rdata->ids.node_name = remote_wwnn; mutex_unlock(&lport->disc.disc_mutex); - lport->tt.rport_login(lport->ptp_rp); + lport->tt.rport_login(lport->ptp_rdata); fc_lport_enter_ready(lport); } +/** + * fc_get_host_port_type() - Return the port type of the given Scsi_Host + * @shost: The SCSI host whose port type is to be determined + */ void fc_get_host_port_type(struct Scsi_Host *shost) { /* TODO - currently just NPORT */ @@ -240,25 +249,33 @@ void fc_get_host_port_type(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_port_type); +/** + * fc_get_host_port_state() - Return the port state of the given Scsi_Host + * @shost: The SCSI host whose port state is to be determined + */ void fc_get_host_port_state(struct Scsi_Host *shost) { - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); - mutex_lock(&lp->lp_mutex); - if (!lp->link_up) + mutex_lock(&lport->lp_mutex); + if (!lport->link_up) fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; else - switch (lp->state) { + switch (lport->state) { case LPORT_ST_READY: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; default: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; } - mutex_unlock(&lp->lp_mutex); + mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_get_host_port_state); +/** + * fc_get_host_speed() - Return the speed of the given Scsi_Host + * @shost: The SCSI host whose port speed is to be determined + */ void fc_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); @@ -267,24 +284,28 @@ void fc_get_host_speed(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_speed); +/** + * fc_get_host_stats() - Return the Scsi_Host's statistics + * @shost: The SCSI host whose statistics are to be returned + */ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fcoe_stats; - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); struct timespec v0, v1; unsigned int cpu; - fcoe_stats = &lp->host_stats; + fcoe_stats = &lport->host_stats; memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); jiffies_to_timespec(jiffies, &v0); - jiffies_to_timespec(lp->boot_time, &v1); + jiffies_to_timespec(lport->boot_time, &v1); fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); for_each_possible_cpu(cpu) { struct fcoe_dev_stats *stats; - stats = per_cpu_ptr(lp->dev_stats, cpu); + stats = per_cpu_ptr(lport->dev_stats, cpu); fcoe_stats->tx_frames += stats->TxFrames; fcoe_stats->tx_words += stats->TxWords; @@ -309,12 +330,15 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_stats); -/* - * Fill in FLOGI command for request. +/** + * fc_lport_flogi_fill() - Fill in FLOGI command for request + * @lport: The local port the FLOGI is for + * @flogi: The FLOGI command + * @op: The opcode */ -static void -fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, - unsigned int op) +static void fc_lport_flogi_fill(struct fc_lport *lport, + struct fc_els_flogi *flogi, + unsigned int op) { struct fc_els_csp *sp; struct fc_els_cssp *cp; @@ -342,8 +366,10 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, } } -/* - * Add a supported FC-4 type. +/** + * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port + * @lport: The local port to add a new FC-4 type to + * @type: The new FC-4 type */ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) { @@ -355,9 +381,9 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) /** * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. + * @sp: The sequence in the RLIR exchange + * @fp: The RLIR request frame * @lport: Fibre Channel local port recieving the RLIR - * @sp: current sequence in the RLIR exchange - * @fp: RLIR request frame * * Locking Note: The lport lock is expected to be held before calling * this function. @@ -374,9 +400,9 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_lport_recv_echo_req() - Handle received ECHO request - * @lport: Fibre Channel local port recieving the ECHO - * @sp: current sequence in the ECHO exchange - * @fp: ECHO request frame + * @sp: The sequence in the ECHO exchange + * @fp: ECHO request frame + * @lport: The local port recieving the ECHO * * Locking Note: The lport lock is expected to be held before calling * this function. @@ -483,9 +509,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, /** * fc_lport_recv_logo_req() - Handle received fabric LOGO request - * @lport: Fibre Channel local port recieving the LOGO - * @sp: current sequence in the LOGO exchange - * @fp: LOGO request frame + * @sp: The sequence in the LOGO exchange + * @fp: The LOGO request frame + * @lport: The local port recieving the LOGO * * Locking Note: The lport lock is exected to be held before calling * this function. @@ -500,7 +526,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_fabric_login() - Start the lport state machine - * @lport: The lport that should log into the fabric + * @lport: The local port that should log into the fabric * * Locking Note: This function should not be called * with the lport lock held. @@ -538,7 +564,7 @@ void __fc_linkup(struct fc_lport *lport) /** * fc_linkup() - Handler for transport linkup events - * @lport: The lport whose link is up + * @lport: The local port whose link is up */ void fc_linkup(struct fc_lport *lport) { @@ -568,7 +594,7 @@ void __fc_linkdown(struct fc_lport *lport) /** * fc_linkdown() - Handler for transport linkdown events - * @lport: The lport whose link is down + * @lport: The local port whose link is down */ void fc_linkdown(struct fc_lport *lport) { @@ -583,7 +609,7 @@ EXPORT_SYMBOL(fc_linkdown); /** * fc_fabric_logoff() - Logout of the fabric - * @lport: fc_lport pointer to logoff the fabric + * @lport: The local port to logoff the fabric * * Return value: * 0 for success, -1 for failure @@ -592,8 +618,8 @@ int fc_fabric_logoff(struct fc_lport *lport) { lport->tt.disc_stop_final(lport); mutex_lock(&lport->lp_mutex); - if (lport->dns_rp) - lport->tt.rport_logoff(lport->dns_rp); + if (lport->dns_rdata) + lport->tt.rport_logoff(lport->dns_rdata); mutex_unlock(&lport->lp_mutex); lport->tt.rport_flush_queue(); mutex_lock(&lport->lp_mutex); @@ -605,11 +631,9 @@ int fc_fabric_logoff(struct fc_lport *lport) EXPORT_SYMBOL(fc_fabric_logoff); /** - * fc_lport_destroy() - unregister a fc_lport - * @lport: fc_lport pointer to unregister + * fc_lport_destroy() - Unregister a fc_lport + * @lport: The local port to unregister * - * Return value: - * None * Note: * exit routine for fc_lport instance * clean-up all the allocated memory @@ -632,13 +656,9 @@ int fc_lport_destroy(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_destroy); /** - * fc_set_mfs() - sets up the mfs for the corresponding fc_lport - * @lport: fc_lport pointer to unregister - * @mfs: the new mfs for fc_lport - * - * Set mfs for the given fc_lport to the new mfs. - * - * Return: 0 for success + * fc_set_mfs() - Set the maximum frame size for a local port + * @lport: The local port to set the MFS for + * @mfs: The new MFS */ int fc_set_mfs(struct fc_lport *lport, u32 mfs) { @@ -669,7 +689,7 @@ EXPORT_SYMBOL(fc_set_mfs); /** * fc_lport_disc_callback() - Callback for discovery events - * @lport: FC local port + * @lport: The local port receiving the event * @event: The discovery event */ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) @@ -693,7 +713,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) /** * fc_rport_enter_ready() - Enter the ready state and start discovery - * @lport: Fibre Channel local port that is ready + * @lport: The local port that is ready * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -708,15 +728,15 @@ static void fc_lport_enter_ready(struct fc_lport *lport) fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); fc_vports_linkchange(lport); - if (!lport->ptp_rp) + if (!lport->ptp_rdata) lport->tt.disc_start(fc_lport_disc_callback, lport); } /** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @sp_in: The sequence the FLOGI is on - * @rx_fp: The frame the FLOGI is in - * @lport: The lport that recieved the request + * @rx_fp: The FLOGI frame + * @lport: The local port that recieved the request * * A received FLOGI request indicates a point-to-point connection. * Accept it with the common service parameters indicating our N port. @@ -802,9 +822,9 @@ out: /** * fc_lport_recv_req() - The generic lport request handler - * @lport: The lport that received the request - * @sp: The sequence the request is on - * @fp: The frame the request is in + * @lport: The local port that received the request + * @sp: The sequence the request is on + * @fp: The request frame * * This function will see if the lport handles the request or * if an rport should handle the request. @@ -872,8 +892,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, } /** - * fc_lport_reset() - Reset an lport - * @lport: The lport which should be reset + * fc_lport_reset() - Reset a local port + * @lport: The local port which should be reset * * Locking Note: This functions should not be called with the * lport lock held. @@ -889,18 +909,18 @@ int fc_lport_reset(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_reset); /** - * fc_lport_reset_locked() - Reset the local port - * @lport: Fibre Channel local port to be reset + * fc_lport_reset_locked() - Reset the local port w/ the lport lock held + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_reset_locked(struct fc_lport *lport) { - if (lport->dns_rp) - lport->tt.rport_logoff(lport->dns_rp); + if (lport->dns_rdata) + lport->tt.rport_logoff(lport->dns_rdata); - lport->ptp_rp = NULL; + lport->ptp_rdata = NULL; lport->tt.disc_stop(lport); @@ -911,7 +931,7 @@ static void fc_lport_reset_locked(struct fc_lport *lport) /** * fc_lport_enter_reset() - Reset the local port - * @lport: Fibre Channel local port to be reset + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -935,8 +955,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport) } /** - * fc_lport_enter_disabled() - disable the local port - * @lport: Fibre Channel local port to be reset + * fc_lport_enter_disabled() - Disable the local port + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -953,8 +973,8 @@ static void fc_lport_enter_disabled(struct fc_lport *lport) /** * fc_lport_error() - Handler for any errors - * @lport: The fc_lport object - * @fp: The frame pointer + * @lport: The local port that the error was on + * @fp: The error code encoded in a frame pointer * * If the error was caused by a resource allocation failure * then wait for half a second and retry, otherwise retry @@ -1002,13 +1022,13 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) /** * fc_lport_ns_resp() - Handle response to a name server - * registration exchange - * @sp: current sequence in exchange - * @fp: response frame + * registration exchange + * @sp: current sequence in exchange + * @fp: response frame * @lp_arg: Fibre Channel host port instance * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, @@ -1027,7 +1047,7 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFT_ID) { FC_LPORT_DBG(lport, "Received a name server response, " - "but in state %s\n", fc_lport_state(lport)); + "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; @@ -1072,8 +1092,8 @@ err: /** * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request - * @sp: current sequence in SCR exchange - * @fp: response frame + * @sp: current sequence in SCR exchange + * @fp: response frame * @lp_arg: Fibre Channel lport port instance that sent the registration request * * Locking Note: This function will be called without the lport lock @@ -1119,8 +1139,8 @@ err: } /** - * fc_lport_enter_scr() - Send a State Change Register (SCR) request - * @lport: Fibre Channel local port to register for state changes + * fc_lport_enter_scr() - Send a SCR (State Change Register) request + * @lport: The local port to register for state changes * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1212,8 +1232,8 @@ static struct fc_rport_operations fc_lport_rport_ops = { }; /** - * fc_rport_enter_dns() - Create a rport to the name server - * @lport: Fibre Channel local port requesting a rport for the name server + * fc_rport_enter_dns() - Create a fc_rport for the name server + * @lport: The local port requesting a remote port for the name server * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1242,8 +1262,8 @@ err: } /** - * fc_lport_timeout() - Handler for the retry_work timer. - * @work: The work struct of the fc_lport + * fc_lport_timeout() - Handler for the retry_work timer + * @work: The work struct of the local port */ static void fc_lport_timeout(struct work_struct *work) { @@ -1287,16 +1307,16 @@ static void fc_lport_timeout(struct work_struct *work) /** * fc_lport_logo_resp() - Handle response to LOGO request - * @sp: current sequence in LOGO exchange - * @fp: response frame - * @lp_arg: Fibre Channel lport port instance that sent the LOGO request + * @sp: The sequence that the LOGO was on + * @fp: The LOGO frame + * @lp_arg: The lport port that received the LOGO request * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) + void *lp_arg) { struct fc_lport *lport = lp_arg; u8 op; @@ -1336,7 +1356,7 @@ EXPORT_SYMBOL(fc_lport_logo_resp); /** * fc_rport_enter_logo() - Logout of the fabric - * @lport: Fibre Channel local port to be logged out + * @lport: The local port to be logged out * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1365,16 +1385,16 @@ static void fc_lport_enter_logo(struct fc_lport *lport) /** * fc_lport_flogi_resp() - Handle response to FLOGI request - * @sp: current sequence in FLOGI exchange - * @fp: response frame - * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request + * @sp: The sequence that the FLOGI was on + * @fp: The FLOGI response frame + * @lp_arg: The lport port that received the FLOGI response * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) + void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; @@ -1484,7 +1504,10 @@ void fc_lport_enter_flogi(struct fc_lport *lport) fc_lport_error(lport, NULL); } -/* Configure a fc_lport */ +/** + * fc_lport_config() - Configure a fc_lport + * @lport: The local port to be configured + */ int fc_lport_config(struct fc_lport *lport) { INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); @@ -1499,6 +1522,10 @@ int fc_lport_config(struct fc_lport *lport) } EXPORT_SYMBOL(fc_lport_config); +/** + * fc_lport_init() - Initialize the lport layer for a local port + * @lport: The local port to initialize the exchange layer for + */ int fc_lport_init(struct fc_lport *lport) { if (!lport->tt.lport_recv) @@ -1533,10 +1560,10 @@ int fc_lport_init(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_init); /** - * fc_lport_bsg_resp() - The common response handler for fc pass-thru requests - * @sp: current sequence in the fc pass-thru request exchange - * @fp: received response frame - * @info_arg: pointer to struct fc_bsg_info + * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests + * @sp: The sequence for the FC Passthrough response + * @fp: The response frame + * @info_arg: The BSG info that the response is for */ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, void *info_arg) @@ -1596,10 +1623,10 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, } /** - * fc_lport_els_request() - Send ELS pass-thru request - * @job: The bsg fc pass-thru job structure + * fc_lport_els_request() - Send ELS passthrough request + * @job: The BSG Passthrough job * @lport: The local port sending the request - * @did: The destination port id. + * @did: The destination port id * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1656,11 +1683,11 @@ static int fc_lport_els_request(struct fc_bsg_job *job, } /** - * fc_lport_ct_request() - Send CT pass-thru request - * @job: The bsg fc pass-thru job structure + * fc_lport_ct_request() - Send CT Passthrough request + * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination FC-ID - * @tov: The time to wait for a response + * @tov: The timeout period to wait for the response * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1717,8 +1744,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job, /** * fc_lport_bsg_request() - The common entry point for sending - * fc pass-thru requests - * @job: The fc pass-thru job structure + * FC Passthrough requests + * @job: The BSG passthrough job */ int fc_lport_bsg_request(struct fc_bsg_job *job) { @@ -1759,7 +1786,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) case FC_BSG_HST_CT: did = ntoh24(job->request->rqst_data.h_ct.port_id); if (did == FC_FID_DIR_SERV) - rdata = lport->dns_rp; + rdata = lport->dns_rdata; else rdata = lport->tt.rport_lookup(lport, did); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 622285c81fef..6578968a753d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -92,9 +92,9 @@ static const char *fc_rport_state_names[] = { }; /** - * fc_rport_lookup() - lookup a remote port by port_id - * @lport: Fibre Channel host port instance - * @port_id: remote port port_id to match + * fc_rport_lookup() - Lookup a remote port by port_id + * @lport: The local port to lookup the remote port on + * @port_id: The remote port ID to look up */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) @@ -109,8 +109,10 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, /** * fc_rport_create() - Create a new remote port - * @lport: The local port that the new remote port is for - * @port_id: The port ID for the new remote port + * @lport: The local port this remote port will be associated with + * @ids: The identifiers for the new remote port + * + * The remote port will start in the INIT state. * * Locking note: must be called with the disc_mutex held. */ @@ -149,8 +151,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, } /** - * fc_rport_destroy() - free a remote port after last reference is released. - * @kref: pointer to kref inside struct fc_rport_priv + * fc_rport_destroy() - Free a remote port after last reference is released + * @kref: The remote port's kref */ static void fc_rport_destroy(struct kref *kref) { @@ -161,8 +163,8 @@ static void fc_rport_destroy(struct kref *kref) } /** - * fc_rport_state() - return a string for the state the rport is in - * @rdata: remote port private data + * fc_rport_state() - Return a string identifying the remote port's state + * @rdata: The remote port */ static const char *fc_rport_state(struct fc_rport_priv *rdata) { @@ -175,9 +177,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata) } /** - * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. - * @rport: Pointer to Fibre Channel remote port structure - * @timeout: timeout in seconds + * fc_set_rport_loss_tmo() - Set the remote port loss timeout + * @rport: The remote port that gets a new timeout value + * @timeout: The new timeout value (in seconds) */ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { @@ -189,9 +191,11 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) EXPORT_SYMBOL(fc_set_rport_loss_tmo); /** - * fc_plogi_get_maxframe() - Get max payload from the common service parameters - * @flp: FLOGI payload structure - * @maxval: upper limit, may be less than what is in the service parameters + * fc_plogi_get_maxframe() - Get the maximum payload from the common service + * parameters in a FLOGI frame + * @flp: The FLOGI payload + * @maxval: The maximum frame size upper limit; this may be less than what + * is in the service parameters */ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) @@ -212,9 +216,9 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, } /** - * fc_rport_state_enter() - Change the rport's state - * @rdata: The rport whose state should change - * @new: The new state of the rport + * fc_rport_state_enter() - Change the state of a remote port + * @rdata: The remote port whose state should change + * @new: The new state * * Locking Note: Called with the rport lock held */ @@ -226,12 +230,16 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata, rdata->rp_state = new; } +/** + * fc_rport_work() - Handler for remote port events in the rport_event_queue + * @work: Handle to the remote port being dequeued + */ static void fc_rport_work(struct work_struct *work) { u32 port_id; struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, event_work); - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; enum fc_rport_event event; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; @@ -268,12 +276,12 @@ static void fc_rport_work(struct work_struct *work) rport->maxframe_size = rdata->maxframe_size; rport->supported_classes = rdata->supported_classes; - rp = rport->dd_data; - rp->local_port = lport; - rp->rp_state = rdata->rp_state; - rp->flags = rdata->flags; - rp->e_d_tov = rdata->e_d_tov; - rp->r_a_tov = rdata->r_a_tov; + rpriv = rport->dd_data; + rpriv->local_port = lport; + rpriv->rp_state = rdata->rp_state; + rpriv->flags = rdata->flags; + rpriv->e_d_tov = rdata->e_d_tov; + rpriv->r_a_tov = rdata->r_a_tov; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { @@ -319,8 +327,8 @@ static void fc_rport_work(struct work_struct *work) lport->tt.exch_mgr_reset(lport, port_id, 0); if (rport) { - rp = rport->dd_data; - rp->rp_state = RPORT_ST_DELETE; + rpriv = rport->dd_data; + rpriv->rp_state = RPORT_ST_DELETE; mutex_lock(&rdata->rp_mutex); rdata->rport = NULL; mutex_unlock(&rdata->rp_mutex); @@ -343,7 +351,7 @@ static void fc_rport_work(struct work_struct *work) /** * fc_rport_login() - Start the remote port login state machine - * @rdata: private remote port + * @rdata: The remote port to be logged in to * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -379,9 +387,9 @@ int fc_rport_login(struct fc_rport_priv *rdata) } /** - * fc_rport_enter_delete() - schedule a remote port to be deleted. - * @rdata: private remote port - * @event: event to report as the reason for deletion + * fc_rport_enter_delete() - Schedule a remote port to be deleted + * @rdata: The remote port to be deleted + * @event: The event to report as the reason for deletion * * Locking Note: Called with the rport lock held. * @@ -408,8 +416,8 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, } /** - * fc_rport_logoff() - Logoff and remove an rport - * @rdata: private remote port + * fc_rport_logoff() - Logoff and remove a remote port + * @rdata: The remote port to be logged off of * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -442,8 +450,8 @@ out: } /** - * fc_rport_enter_ready() - The rport is ready - * @rdata: private remote port + * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state + * @rdata: The remote port that is ready * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -460,8 +468,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) } /** - * fc_rport_timeout() - Handler for the retry_work timer. - * @work: The work struct of the fc_rport_priv + * fc_rport_timeout() - Handler for the retry_work timer + * @work: Handle to the remote port that has timed out * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -502,8 +510,8 @@ static void fc_rport_timeout(struct work_struct *work) /** * fc_rport_error() - Error handler, called once retries have been exhausted - * @rdata: private remote port - * @fp: The frame pointer + * @rdata: The remote port the error is happened on + * @fp: The error code encapsulated in a frame pointer * * Locking Note: The rport lock is expected to be held before * calling this routine @@ -535,9 +543,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) } /** - * fc_rport_error_retry() - Error handler when retries are desired - * @rdata: private remote port data - * @fp: The frame pointer + * fc_rport_error_retry() - Handler for remote port state retries + * @rdata: The remote port whose state is to be retried + * @fp: The error code encapsulated in a frame pointer * * If the error was an exchange timeout retry immediately, * otherwise wait for E_D_TOV. @@ -569,10 +577,10 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, } /** - * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response - * @sp: current sequence in the PLOGI exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses + * @sp: The sequence the PLOGI is on + * @fp: The PLOGI response frame + * @rdata_arg: The remote port that sent the PLOGI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -635,8 +643,8 @@ err: } /** - * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer - * @rdata: private remote port data + * fc_rport_enter_plogi() - Send Port Login (PLOGI) request + * @rdata: The remote port to send a PLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -668,9 +676,9 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) /** * fc_rport_prli_resp() - Process Login (PRLI) response handler - * @sp: current sequence in the PRLI exchange - * @fp: response frame - * @rdata_arg: private remote port data + * @sp: The sequence the PRLI response was on + * @fp: The PRLI response frame + * @rdata_arg: The remote port that sent the PRLI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -739,10 +747,10 @@ err: } /** - * fc_rport_logo_resp() - Logout (LOGO) response handler - * @sp: current sequence in the LOGO exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_logo_resp() - Handler for logout (LOGO) responses + * @sp: The sequence the LOGO was on + * @fp: The LOGO response frame + * @rdata_arg: The remote port that sent the LOGO response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -785,8 +793,8 @@ err: } /** - * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer - * @rdata: private remote port data + * fc_rport_enter_prli() - Send Process Login (PRLI) request + * @rdata: The remote port to send the PRLI request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -828,10 +836,10 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) } /** - * fc_rport_els_rtv_resp() - Request Timeout Value response handler - * @sp: current sequence in the RTV exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses + * @sp: The sequence the RTV was on + * @fp: The RTV response frame + * @rdata_arg: The remote port that sent the RTV response * * Many targets don't seem to support this. * @@ -894,8 +902,8 @@ err: } /** - * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer - * @rdata: private remote port data + * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request + * @rdata: The remote port to send the RTV request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -917,15 +925,15 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, - fc_rport_rtv_resp, rdata, lport->e_d_tov)) + fc_rport_rtv_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** - * fc_rport_enter_logo() - Send Logout (LOGO) request to peer - * @rdata: private remote port data + * fc_rport_enter_logo() - Send a logout (LOGO) request + * @rdata: The remote port to send the LOGO request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -954,17 +962,17 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) } /** - * fc_rport_els_adisc_resp() - Address Discovery response handler - * @sp: current sequence in the ADISC exchange - * @fp: response frame - * @rdata_arg: remote port private. + * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses + * @sp: The sequence the ADISC response was on + * @fp: The ADISC response frame + * @rdata_arg: The remote port that sent the ADISC response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rdata_arg) + void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_els_adisc *adisc; @@ -1012,8 +1020,8 @@ err: } /** - * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer - * @rdata: remote port private data + * fc_rport_enter_adisc() - Send Address Discover (ADISC) request + * @rdata: The remote port to send the ADISC request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -1041,10 +1049,10 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) } /** - * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request - * @rdata: remote port private - * @sp: current sequence in the ADISC exchange - * @in_fp: ADISC request frame + * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests + * @rdata: The remote port that sent the ADISC request + * @sp: The sequence the ADISC request was on + * @in_fp: The ADISC request frame * * Locking Note: Called with the lport and rport locks held. */ @@ -1085,10 +1093,10 @@ drop: } /** - * fc_rport_recv_els_req() - handle a validated ELS request. - * @lport: Fibre Channel local port - * @sp: current sequence in the PLOGI exchange - * @fp: response frame + * fc_rport_recv_els_req() - Handler for validated ELS requests + * @lport: The local port that received the ELS request + * @sp: The sequence that the ELS request was on + * @fp: The ELS request frame * * Handle incoming ELS requests that require port login. * The ELS opcode has already been validated by the caller. @@ -1160,10 +1168,10 @@ reject: } /** - * fc_rport_recv_req() - Handle a received ELS request from a rport - * @sp: current sequence in the PLOGI exchange - * @fp: response frame - * @lport: Fibre Channel local port + * fc_rport_recv_req() - Handler for requests + * @sp: The sequence the request was on + * @fp: The request frame + * @lport: The local port that received the request * * Locking Note: Called with the lport lock held. */ @@ -1203,10 +1211,10 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, } /** - * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request - * @lport: local port - * @sp: current sequence in the PLOGI exchange - * @fp: PLOGI request frame + * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests + * @lport: The local port that received the PLOGI request + * @sp: The sequence that the PLOGI request was on + * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. */ @@ -1328,10 +1336,10 @@ reject: } /** - * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request - * @rdata: private remote port data - * @sp: current sequence in the PRLI exchange - * @fp: PRLI request frame + * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests + * @rdata: The remote port that sent the PRLI request + * @sp: The sequence that the PRLI was on + * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1485,10 +1493,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, } /** - * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request - * @rdata: private remote port data - * @sp: current sequence in the PRLO exchange - * @fp: PRLO request frame + * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests + * @rdata: The remote port that sent the PRLO request + * @sp: The sequence that the PRLO was on + * @fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1515,10 +1523,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, } /** - * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request - * @lport: local port. - * @sp: current sequence in the LOGO exchange - * @fp: LOGO request frame + * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests + * @lport: The local port that received the LOGO request + * @sp: The sequence that the LOGO request was on + * @fp: The LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1559,11 +1567,18 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, fc_frame_free(fp); } +/** + * fc_rport_flush_queue() - Flush the rport_event_queue + */ static void fc_rport_flush_queue(void) { flush_workqueue(rport_event_queue); } +/** + * fc_rport_init() - Initialize the remote port layer for a local port + * @lport: The local port to initialize the remote port layer for + */ int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_lookup) @@ -1591,7 +1606,10 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); -int fc_setup_rport(void) +/** + * fc_setup_rport() - Initialize the rport_event_queue + */ +int fc_setup_rport() { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1599,15 +1617,22 @@ int fc_setup_rport(void) return 0; } -void fc_destroy_rport(void) +/** + * fc_destroy_rport() - Destroy the rport_event_queue + */ +void fc_destroy_rport() { destroy_workqueue(rport_event_queue); } +/** + * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port + * @rport: The remote port whose I/O should be terminated + */ void fc_rport_terminate_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rp = rport->dd_data; - struct fc_lport *lport = rp->local_port; + struct fc_rport_libfc_priv *rpriv = rport->dd_data; + struct fc_lport *lport = rpriv->local_port; lport->tt.exch_mgr_reset(lport, 0, rport->port_id); lport->tt.exch_mgr_reset(lport, rport->port_id, 0); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 54df9fe00c14..310d8a22b726 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -55,8 +55,17 @@ p[2] = ((v) & 0xFF); \ } while (0) -/* - * FC HBA status +/** + * enum fc_lport_state - Local port states + * @LPORT_ST_DISABLED: Disabled + * @LPORT_ST_FLOGI: Fabric login (FLOGI) sent + * @LPORT_ST_DNS: Waiting for name server remote port to become ready + * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent + * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent + * @LPORT_ST_SCR: State Change Register (SCR) sent + * @LPORT_ST_READY: Ready for use + * @LPORT_ST_LOGO: Local port logout (LOGO) sent + * @LPORT_ST_RESET: Local port reset */ enum fc_lport_state { LPORT_ST_DISABLED = 0, @@ -78,16 +87,28 @@ enum fc_disc_event { DISC_EV_FAILED }; +/** + * enum fc_rport_state - Remote port states + * @RPORT_ST_INIT: Initialized + * @RPORT_ST_PLOGI: Waiting for PLOGI completion + * @RPORT_ST_PRLI: Waiting for PRLI completion + * @RPORT_ST_RTV: Waiting for RTV completion + * @RPORT_ST_READY: Ready for use + * @RPORT_ST_LOGO: Remote port logout (LOGO) sent + * @RPORT_ST_ADISC: Discover Address sent + * @RPORT_ST_DELETE: Remote port being deleted + * @RPORT_ST_RESTART: Remote port being deleted and will restart +*/ enum fc_rport_state { - RPORT_ST_INIT, /* initialized */ - RPORT_ST_PLOGI, /* waiting for PLOGI completion */ - RPORT_ST_PRLI, /* waiting for PRLI completion */ - RPORT_ST_RTV, /* waiting for RTV completion */ - RPORT_ST_READY, /* ready for use */ - RPORT_ST_LOGO, /* port logout sent */ - RPORT_ST_ADISC, /* Discover Address sent */ - RPORT_ST_DELETE, /* port being deleted */ - RPORT_ST_RESTART, /* remote port being deleted and will restart */ + RPORT_ST_INIT, + RPORT_ST_PLOGI, + RPORT_ST_PRLI, + RPORT_ST_RTV, + RPORT_ST_READY, + RPORT_ST_LOGO, + RPORT_ST_ADISC, + RPORT_ST_DELETE, + RPORT_ST_RESTART, }; /** @@ -98,12 +119,20 @@ enum fc_rport_state { * @port_id: Port ID of the discovered port */ struct fc_disc_port { - struct fc_lport *lp; - struct list_head peers; - struct work_struct rport_work; - u32 port_id; + struct fc_lport *lp; + struct list_head peers; + struct work_struct rport_work; + u32 port_id; }; +/** + * enum fc_rport_event - Remote port events + * @RPORT_EV_NONE: No event + * @RPORT_EV_READY: Remote port is ready for use + * @RPORT_EV_FAILED: State machine failed, remote port is not ready + * @RPORT_EV_STOP: Remote port has been stopped + * @RPORT_EV_LOGO: Remote port logout (LOGO) sent + */ enum fc_rport_event { RPORT_EV_NONE = 0, RPORT_EV_READY, @@ -114,6 +143,10 @@ enum fc_rport_event { struct fc_rport_priv; +/** + * struct fc_rport_operations - Operations for a remote port + * @event_callback: Function to be called for remote port events + */ struct fc_rport_operations { void (*event_callback)(struct fc_lport *, struct fc_rport_priv *, enum fc_rport_event); @@ -121,11 +154,11 @@ struct fc_rport_operations { /** * struct fc_rport_libfc_priv - libfc internal information about a remote port - * @local_port: Fibre Channel host port instance - * @rp_state: indicates READY for I/O or DELETE when blocked. - * @flags: REC and RETRY supported flags - * @e_d_tov: error detect timeout value (in msec) - * @r_a_tov: resource allocation timeout value (in msec) + * @local_port: The associated local port + * @rp_state: Indicates READY for I/O or DELETE when blocked + * @flags: REC and RETRY supported flags + * @e_d_tov: Error detect timeout value (in msec) + * @r_a_tov: Resource allocation timeout value (in msec) */ struct fc_rport_libfc_priv { struct fc_lport *local_port; @@ -138,47 +171,64 @@ struct fc_rport_libfc_priv { }; /** - * struct fc_rport_priv - libfc rport and discovery info about a remote port - * @local_port: Fibre Channel host port instance - * @rport: transport remote port - * @kref: reference counter - * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges - * @ids: remote port identifiers and roles - * @flags: REC and RETRY supported flags - * @max_seq: maximum number of concurrent sequences - * @disc_id: discovery identifier - * @maxframe_size: maximum frame size - * @retries: retry count in current state - * @e_d_tov: error detect timeout value (in msec) - * @r_a_tov: resource allocation timeout value (in msec) - * @rp_mutex: mutex protects rport - * @retry_work: - * @event_callback: Callback for rport READY, FAILED or LOGO + * struct fc_rport_priv - libfc remote port and discovery info + * @local_port: The associated local port + * @rport: The FC transport remote port + * @kref: Reference counter + * @rp_state: Enumeration that tracks progress of PLOGI, PRLI, + * and RTV exchanges + * @ids: The remote port identifiers and roles + * @flags: REC and RETRY supported flags + * @max_seq: Maximum number of concurrent sequences + * @disc_id: The discovery identifier + * @maxframe_size: The maximum frame size + * @retries: The retry count for the current state + * @e_d_tov: Error detect timeout value (in msec) + * @r_a_tov: Resource allocation timeout value (in msec) + * @rp_mutex: The mutex that protects the remote port + * @retry_work: Handle for retries + * @event_callback: Callback when READY, FAILED or LOGO states complete */ struct fc_rport_priv { - struct fc_lport *local_port; - struct fc_rport *rport; - struct kref kref; - enum fc_rport_state rp_state; + struct fc_lport *local_port; + struct fc_rport *rport; + struct kref kref; + enum fc_rport_state rp_state; struct fc_rport_identifiers ids; - u16 flags; - u16 max_seq; - u16 disc_id; - u16 maxframe_size; - unsigned int retries; - unsigned int e_d_tov; - unsigned int r_a_tov; - struct mutex rp_mutex; - struct delayed_work retry_work; - enum fc_rport_event event; - struct fc_rport_operations *ops; - struct list_head peers; - struct work_struct event_work; - u32 supported_classes; + u16 flags; + u16 max_seq; + u16 disc_id; + u16 maxframe_size; + unsigned int retries; + unsigned int e_d_tov; + unsigned int r_a_tov; + struct mutex rp_mutex; + struct delayed_work retry_work; + enum fc_rport_event event; + struct fc_rport_operations *ops; + struct list_head peers; + struct work_struct event_work; + u32 supported_classes; }; -/* - * fcoe stats structure +/** + * struct fcoe_dev_stats - fcoe stats structure + * @SecondsSinceLastReset: Seconds since the last reset + * @TxFrames: Number of transmitted frames + * @TxWords: Number of transmitted words + * @RxFrames: Number of received frames + * @RxWords: Number of received words + * @ErrorFrames: Number of received error frames + * @DumpedFrames: Number of dumped frames + * @LinkFailureCount: Number of link failures + * @LossOfSignalCount: Number for signal losses + * @InvalidTxWordCount: Number of invalid transmitted words + * @InvalidCRCCount: Number of invalid CRCs + * @InputRequests: Number of input requests + * @OutputRequests: Number of output requests + * @ControlRequests: Number of control requests + * @InputMegabytes: Number of received megabytes + * @OutputMegabytes: Number of transmitted megabytes */ struct fcoe_dev_stats { u64 SecondsSinceLastReset; @@ -199,10 +249,13 @@ struct fcoe_dev_stats { u64 OutputMegabytes; }; -/* - * els data is used for passing ELS respone specific - * data to send ELS response mainly using infomation - * in exchange and sequence in EM layer. +/** + * struct fc_seq_els_data - ELS data used for passing ELS specific responses + * @fp: The ELS frame + * @reason: The reason for rejection + * @explan: The explaination of the rejection + * + * Mainly used by the exchange manager layer. */ struct fc_seq_els_data { struct fc_frame *fp; @@ -210,77 +263,87 @@ struct fc_seq_els_data { enum fc_els_rjt_explan explan; }; -/* - * FCP request structure, one for each scsi cmd request +/** + * struct fc_fcp_pkt - FCP request structure (one for each scsi_cmnd request) + * @lp: The associated local port + * @state: The state of the I/O + * @tgt_flags: Target's flags + * @ref_cnt: Reference count + * @scsi_pkt_lock: Lock to protect the SCSI packet (must be taken before the + * host_lock if both are to be held at the same time) + * @cmd: The SCSI command (set and clear with the host_lock held) + * @list: Tracks queued commands (accessed with the host_lock held) + * @timer: The command timer + * @tm_done: Completion indicator + * @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies) + * @start_time: Timestamp indicating the start of the I/O (in jiffies) + * @end_time: Timestamp indicating the end of the I/O (in jiffies) + * @last_pkt_time: Timestamp of the last frame received (in jiffies) + * @data_len: The length of the data + * @cdb_cmd: The CDB command + * @xfer_len: The transfer length + * @xfer_ddp: Indicates if this transfer used DDP (XID of the exchange + * will be set here if DDP was setup) + * @xfer_contig_end: The offset into the buffer if the buffer is contiguous + * (Tx and Rx) + * @max_payload: The maximum payload size (in bytes) + * @io_status: SCSI result (upper 24 bits) + * @cdb_status: CDB status + * @status_code: FCP I/O status + * @scsi_comp_flags: Completion flags (bit 3 Underrun bit 2: overrun) + * @req_flags: Request flags (bit 0: read bit:1 write) + * @scsi_resid: SCSI residule length + * @rport: The remote port that the SCSI command is targeted at + * @seq_ptr: The sequence that will carry the SCSI command + * @recov_retry: Number of recovery retries + * @recov_seq: The sequence for REC or SRR */ struct fc_fcp_pkt { - /* - * housekeeping stuff - */ - struct fc_lport *lp; /* handle to hba struct */ - u16 state; /* scsi_pkt state state */ - u16 tgt_flags; /* target flags */ - atomic_t ref_cnt; /* fcp pkt ref count */ - spinlock_t scsi_pkt_lock; /* Must be taken before the host lock - * if both are held at the same time */ - /* - * SCSI I/O related stuff - */ - struct scsi_cmnd *cmd; /* scsi command pointer. set/clear - * under host lock */ - struct list_head list; /* tracks queued commands. access under - * host lock */ - /* - * timeout related stuff - */ - struct timer_list timer; /* command timer */ + /* Housekeeping information */ + struct fc_lport *lp; + u16 state; + u16 tgt_flags; + atomic_t ref_cnt; + spinlock_t scsi_pkt_lock; + + /* SCSI I/O related information */ + struct scsi_cmnd *cmd; + struct list_head list; + + /* Timeout related information */ + struct timer_list timer; struct completion tm_done; - int wait_for_comp; - unsigned long start_time; /* start jiffie */ - unsigned long end_time; /* end jiffie */ - unsigned long last_pkt_time; /* jiffies of last frame received */ - - /* - * scsi cmd and data transfer information - */ - u32 data_len; - /* - * transport related veriables - */ - struct fcp_cmnd cdb_cmd; - size_t xfer_len; - u16 xfer_ddp; /* this xfer is ddped */ - u32 xfer_contig_end; /* offset of end of contiguous xfer */ - u16 max_payload; /* max payload size in bytes */ - - /* - * scsi/fcp return status - */ - u32 io_status; /* SCSI result upper 24 bits */ - u8 cdb_status; - u8 status_code; /* FCP I/O status */ - /* bit 3 Underrun bit 2: overrun */ - u8 scsi_comp_flags; - u32 req_flags; /* bit 0: read bit:1 write */ - u32 scsi_resid; /* residule length */ - - struct fc_rport *rport; /* remote port pointer */ - struct fc_seq *seq_ptr; /* current sequence pointer */ - /* - * Error Processing - */ - u8 recov_retry; /* count of recovery retries */ - struct fc_seq *recov_seq; /* sequence for REC or SRR */ + int wait_for_comp; + unsigned long start_time; + unsigned long end_time; + unsigned long last_pkt_time; + + /* SCSI command and data transfer information */ + u32 data_len; + + /* Transport related veriables */ + struct fcp_cmnd cdb_cmd; + size_t xfer_len; + u16 xfer_ddp; + u32 xfer_contig_end; + u16 max_payload; + + /* SCSI/FCP return status */ + u32 io_status; + u8 cdb_status; + u8 status_code; + u8 scsi_comp_flags; + u32 req_flags; + u32 scsi_resid; + + /* Associated structures */ + struct fc_rport *rport; + struct fc_seq *seq_ptr; + + /* Error Processing information */ + u8 recov_retry; + struct fc_seq *recov_seq; }; -/* - * FC_FCP HELPER FUNCTIONS - *****************************/ -static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp) -{ - if (fsp && fsp->cmd) - return fsp->cmd->sc_data_direction == DMA_FROM_DEVICE; - return false; -} /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -293,23 +356,51 @@ static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp) struct fc_exch_mgr; struct fc_exch_mgr_anchor; -extern u16 fc_cpu_mask; /* cpu mask for possible cpus */ +extern u16 fc_cpu_mask; /* cpu mask for possible cpus */ -/* - * Sequence. +/** + * struct fc_seq - FC sequence + * @id: The sequence ID + * @ssb_stat: Status flags for the sequence status block (SSB) + * @cnt: Number of frames sent so far + * @rec_data: FC-4 value for REC */ struct fc_seq { - u8 id; /* seq ID */ - u16 ssb_stat; /* status flags for sequence status block */ - u16 cnt; /* frames sent so far on sequence */ - u32 rec_data; /* FC-4 value for REC */ + u8 id; + u16 ssb_stat; + u16 cnt; + u32 rec_data; }; #define FC_EX_DONE (1 << 0) /* ep is completed */ #define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */ -/* - * Exchange. +/** + * struct fc_exch - Fibre Channel Exchange + * @em: Exchange manager + * @pool: Exchange pool + * @state: The exchange's state + * @xid: The exchange ID + * @ex_list: Handle used by the EM to track free exchanges + * @ex_lock: Lock that protects the exchange + * @ex_refcnt: Reference count + * @timeout_work: Handle for timeout handler + * @lp: The local port that this exchange is on + * @oxid: Originator's exchange ID + * @rxid: Responder's exchange ID + * @oid: Originator's FCID + * @sid: Source FCID + * @did: Destination FCID + * @esb_stat: ESB exchange status + * @r_a_tov: Resouce allocation time out value (in msecs) + * @seq_id: The next sequence ID to use + * @f_ctl: F_CTL flags for the sequence + * @fh_type: The frame type + * @class: The class of service + * @seq: The sequence in use on this exchange + * @resp: Callback for responses on this exchange + * @destructor: Called when destroying the exchange + * @arg: Passed as a void pointer to the resp() callback * * Locking notes: The ex_lock protects following items: * state, esb_stat, f_ctl, seq.ssb_stat @@ -317,76 +408,59 @@ struct fc_seq { * sequence allocation */ struct fc_exch { - struct fc_exch_mgr *em; /* exchange manager */ - struct fc_exch_pool *pool; /* per cpu exches pool */ - u32 state; /* internal driver state */ - u16 xid; /* our exchange ID */ - struct list_head ex_list; /* free or busy list linkage */ - spinlock_t ex_lock; /* lock covering exchange state */ - atomic_t ex_refcnt; /* reference counter */ - struct delayed_work timeout_work; /* timer for upper level protocols */ - struct fc_lport *lp; /* fc device instance */ - u16 oxid; /* originator's exchange ID */ - u16 rxid; /* responder's exchange ID */ - u32 oid; /* originator's FCID */ - u32 sid; /* source FCID */ - u32 did; /* destination FCID */ - u32 esb_stat; /* exchange status for ESB */ - u32 r_a_tov; /* r_a_tov from rport (msec) */ - u8 seq_id; /* next sequence ID to use */ - u32 f_ctl; /* F_CTL flags for sequences */ - u8 fh_type; /* frame type */ - enum fc_class class; /* class of service */ - struct fc_seq seq; /* single sequence */ - /* - * Handler for responses to this current exchange. - */ - void (*resp)(struct fc_seq *, struct fc_frame *, void *); - void (*destructor)(struct fc_seq *, void *); - /* - * arg is passed as void pointer to exchange - * resp and destructor handlers - */ - void *arg; + struct fc_exch_mgr *em; + struct fc_exch_pool *pool; + u32 state; + u16 xid; + struct list_head ex_list; + spinlock_t ex_lock; + atomic_t ex_refcnt; + struct delayed_work timeout_work; + struct fc_lport *lp; + u16 oxid; + u16 rxid; + u32 oid; + u32 sid; + u32 did; + u32 esb_stat; + u32 r_a_tov; + u8 seq_id; + u32 f_ctl; + u8 fh_type; + enum fc_class class; + struct fc_seq seq; + + void (*resp)(struct fc_seq *, struct fc_frame *, void *); + void *arg; + + void (*destructor)(struct fc_seq *, void *); + }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) -struct libfc_function_template { +struct libfc_function_template { /* * Interface to send a FC frame * * STATUS: REQUIRED */ - int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp); + int (*frame_send)(struct fc_lport *, struct fc_frame *); /* * Interface to send ELS/CT frames * * STATUS: OPTIONAL */ - struct fc_seq *(*elsct_send)(struct fc_lport *lport, - u32 did, - struct fc_frame *fp, - unsigned int op, + struct fc_seq *(*elsct_send)(struct fc_lport *, u32 did, + struct fc_frame *, unsigned int op, void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), + struct fc_frame *, void *arg), void *arg, u32 timer_msec); /* * Send the FC frame payload using a new exchange and sequence. * - * The frame pointer with some of the header's fields must be - * filled before calling exch_seq_send(), those fields are, - * - * - routing control - * - FC port did - * - FC port sid - * - FC header type - * - frame control - * - parameter or relative offset - * * The exchange response handler is set in this routine to resp() * function pointer. It can be called in two scenarios: if a timeout * occurs or if a response frame is received for the exchange. The @@ -407,14 +481,13 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - struct fc_seq *(*exch_seq_send)(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *sp, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *sp, - void *arg), - void *arg, unsigned int timer_msec); + struct fc_seq *(*exch_seq_send)(struct fc_lport *, struct fc_frame *, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void (*destructor)(struct fc_seq *, + void *), + void *, unsigned int timer_msec); /* * Sets up the DDP context for a given exchange id on the given @@ -422,22 +495,22 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - int (*ddp_setup)(struct fc_lport *lp, u16 xid, - struct scatterlist *sgl, unsigned int sgc); + int (*ddp_setup)(struct fc_lport *, u16, struct scatterlist *, + unsigned int); /* * Completes the DDP transfer and returns the length of data DDPed * for the given exchange id. * * STATUS: OPTIONAL */ - int (*ddp_done)(struct fc_lport *lp, u16 xid); + int (*ddp_done)(struct fc_lport *, u16); /* * Send a frame using an existing sequence and exchange. * * STATUS: OPTIONAL */ - int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp); + int (*seq_send)(struct fc_lport *, struct fc_seq *, + struct fc_frame *); /* * Send an ELS response using infomation from a previous @@ -445,8 +518,8 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data); + void (*seq_els_rsp_send)(struct fc_seq *, enum fc_els_cmd, + struct fc_seq_els_data *); /* * Abort an exchange and sequence. Generally called because of a @@ -458,7 +531,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - int (*seq_exch_abort)(const struct fc_seq *req_sp, + int (*seq_exch_abort)(const struct fc_seq *, unsigned int timer_msec); /* @@ -467,14 +540,14 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*exch_done)(struct fc_seq *sp); + void (*exch_done)(struct fc_seq *); /* * Start a new sequence on the same exchange/sequence tuple. * * STATUS: OPTIONAL */ - struct fc_seq *(*seq_start_next)(struct fc_seq *sp); + struct fc_seq *(*seq_start_next)(struct fc_seq *); /* * Reset an exchange manager, completing all sequences and exchanges. @@ -483,8 +556,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*exch_mgr_reset)(struct fc_lport *, - u32 s_id, u32 d_id); + void (*exch_mgr_reset)(struct fc_lport *, u32 s_id, u32 d_id); /* * Flush the rport work queue. Generally used before shutdown. @@ -498,8 +570,8 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp); + void (*lport_recv)(struct fc_lport *, struct fc_seq *, + struct fc_frame *); /* * Reset the local port. @@ -565,31 +637,31 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp, - void (*resp)(struct fc_seq *, struct fc_frame *fp, - void *arg)); + int (*fcp_cmd_send)(struct fc_lport *, struct fc_fcp_pkt *, + void (*resp)(struct fc_seq *, struct fc_frame *, + void *)); /* * Cleanup the FCP layer, used durring link down and reset * * STATUS: OPTIONAL */ - void (*fcp_cleanup)(struct fc_lport *lp); + void (*fcp_cleanup)(struct fc_lport *); /* * Abort all I/O on a local port * * STATUS: OPTIONAL */ - void (*fcp_abort_io)(struct fc_lport *lp); + void (*fcp_abort_io)(struct fc_lport *); /* * Receive a request for the discovery layer. * * STATUS: OPTIONAL */ - void (*disc_recv_req)(struct fc_seq *, - struct fc_frame *, struct fc_lport *); + void (*disc_recv_req)(struct fc_seq *, struct fc_frame *, + struct fc_lport *); /* * Start discovery for a local port. @@ -618,133 +690,224 @@ struct libfc_function_template { void (*disc_stop_final) (struct fc_lport *); }; -/* information used by the discovery layer */ +/** + * struct fc_disc - Discovery context + * @retry_count: Number of retries + * @pending: 1 if discovery is pending, 0 if not + * @requesting: 1 if discovery has been requested, 0 if not + * @seq_count: Number of sequences used for discovery + * @buf_len: Length of the discovery buffer + * @disc_id: Discovery ID + * @rports: List of discovered remote ports + * @lport: The local port that discovery is for + * @disc_mutex: Mutex that protects the discovery context + * @partial_buf: Partial name buffer (if names are returned + * in multiple frames) + * @disc_work: handle for delayed work context + * @disc_callback: Callback routine called when discovery completes + */ struct fc_disc { - unsigned char retry_count; - unsigned char pending; - unsigned char requested; - unsigned short seq_count; - unsigned char buf_len; - u16 disc_id; + unsigned char retry_count; + unsigned char pending; + unsigned char requested; + unsigned short seq_count; + unsigned char buf_len; + u16 disc_id; + + struct list_head rports; + struct fc_lport *lport; + struct mutex disc_mutex; + struct fc_gpn_ft_resp partial_buf; + struct delayed_work disc_work; void (*disc_callback)(struct fc_lport *, enum fc_disc_event); - - struct list_head rports; - struct fc_lport *lport; - struct mutex disc_mutex; - struct fc_gpn_ft_resp partial_buf; /* partial name buffer */ - struct delayed_work disc_work; }; +/** + * struct fc_lport - Local port + * @host: The SCSI host associated with a local port + * @ema_list: Exchange manager anchor list + * @dns_rdata: The directory server remote port + * @ptp_rdata: Point to point remote port + * @scsi_priv: FCP layer internal data + * @disc: Discovery context + * @vports: Child vports if N_Port + * @vport: Parent vport if VN_Port + * @tt: Libfc function template + * @link_up: Link state (1 = link up, 0 = link down) + * @qfull: Queue state (1 queue is full, 0 queue is not full) + * @state: Identifies the state + * @boot_time: Timestamp indicating when the local port came online + * @host_stats: SCSI host statistics + * @dev_stats: FCoE device stats (TODO: libfc should not be + * FCoE aware) + * @retry_count: Number of retries in the current state + * @wwpn: World Wide Port Name + * @wwnn: World Wide Node Name + * @service_params: Common service parameters + * @e_d_tov: Error detection timeout value + * @r_a_tov: Resouce allocation timeout value + * @rnid_gen: RNID information + * @sg_supp: Indicates if scatter gather is supported + * @seq_offload: Indicates if sequence offload is supported + * @crc_offload: Indicates if CRC offload is supported + * @lro_enabled: Indicates if large receive offload is supported + * @does_npiv: Supports multiple vports + * @npiv_enabled: Switch/fabric allows NPIV + * @mfs: The maximum Fibre Channel payload size + * @max_retry_count: The maximum retry attempts + * @max_rport_retry_count: The maximum remote port retry attempts + * @lro_xid: The maximum XID for LRO + * @lso_max: The maximum large offload send size + * @fcts: FC-4 type mask + * @lp_mutex: Mutex to protect the local port + * @list: Handle for list of local ports + * @retry_work: Handle to local port for delayed retry context + */ struct fc_lport { - struct list_head list; - /* Associations */ - struct Scsi_Host *host; - struct list_head ema_list; - struct list_head vports; /* child vports if N_Port */ - struct fc_vport *vport; /* parent vport if VN_Port */ - struct fc_rport_priv *dns_rp; - struct fc_rport_priv *ptp_rp; - void *scsi_priv; - struct fc_disc disc; + struct Scsi_Host *host; + struct list_head ema_list; + struct fc_rport_priv *dns_rdata; + struct fc_rport_priv *ptp_rdata; + void *scsi_priv; + struct fc_disc disc; + + /* Virtual port information */ + struct list_head vports; + struct fc_vport *vport; /* Operational Information */ struct libfc_function_template tt; - u8 link_up; - u8 qfull; - enum fc_lport_state state; - unsigned long boot_time; - - struct fc_host_statistics host_stats; - struct fcoe_dev_stats *dev_stats; - - u64 wwpn; - u64 wwnn; - u8 retry_count; + u8 link_up; + u8 qfull; + enum fc_lport_state state; + unsigned long boot_time; + struct fc_host_statistics host_stats; + struct fcoe_dev_stats *dev_stats; + u8 retry_count; + + /* Fabric information */ + u64 wwpn; + u64 wwnn; + unsigned int service_params; + unsigned int e_d_tov; + unsigned int r_a_tov; + struct fc_els_rnid_gen rnid_gen; /* Capabilities */ - u32 sg_supp:1; /* scatter gather supported */ - u32 seq_offload:1; /* seq offload supported */ - u32 crc_offload:1; /* crc offload supported */ - u32 lro_enabled:1; /* large receive offload */ - u32 does_npiv:1; /* supports multiple vports */ - u32 npiv_enabled:1; /* switch/fabric allows NPIV */ - u32 mfs; /* max FC payload size */ - unsigned int service_params; - unsigned int e_d_tov; - unsigned int r_a_tov; - u8 max_retry_count; - u8 max_rport_retry_count; - u16 link_speed; - u16 link_supported_speeds; - u16 lro_xid; /* max xid for fcoe lro */ - unsigned int lso_max; /* max large send size */ - struct fc_ns_fts fcts; /* FC-4 type masks */ - struct fc_els_rnid_gen rnid_gen; /* RNID information */ - - /* Semaphores */ - struct mutex lp_mutex; + u32 sg_supp:1; + u32 seq_offload:1; + u32 crc_offload:1; + u32 lro_enabled:1; + u32 does_npiv:1; + u32 npiv_enabled:1; + u32 mfs; + u8 max_retry_count; + u8 max_rport_retry_count; + u16 link_speed; + u16 link_supported_speeds; + u16 lro_xid; + unsigned int lso_max; + struct fc_ns_fts fcts; /* Miscellaneous */ - struct delayed_work retry_work; + struct mutex lp_mutex; + struct list_head list; + struct delayed_work retry_work; }; /* * FC_LPORT HELPER FUNCTIONS *****************************/ -static inline int fc_lport_test_ready(struct fc_lport *lp) + +/** + * fc_lport_test_ready() - Determine if a local port is in the READY state + * @lport: The local port to test + */ +static inline int fc_lport_test_ready(struct fc_lport *lport) { - return lp->state == LPORT_ST_READY; + return lport->state == LPORT_ST_READY; } -static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn) +/** + * fc_set_wwnn() - Set the World Wide Node Name of a local port + * @lport: The local port whose WWNN is to be set + * @wwnn: The new WWNN + */ +static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn) { - lp->wwnn = wwnn; + lport->wwnn = wwnn; } -static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn) +/** + * fc_set_wwpn() - Set the World Wide Port Name of a local port + * @lport: The local port whose WWPN is to be set + * @wwnn: The new WWPN + */ +static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn) { - lp->wwpn = wwnn; + lport->wwpn = wwnn; } -static inline void fc_lport_state_enter(struct fc_lport *lp, +/** + * fc_lport_state_enter() - Change a local port's state + * @lport: The local port whose state is to change + * @state: The new state + */ +static inline void fc_lport_state_enter(struct fc_lport *lport, enum fc_lport_state state) { - if (state != lp->state) - lp->retry_count = 0; - lp->state = state; + if (state != lport->state) + lport->retry_count = 0; + lport->state = state; } -static inline int fc_lport_init_stats(struct fc_lport *lp) +/** + * fc_lport_init_stats() - Allocate per-CPU statistics for a local port + * @lport: The local port whose statistics are to be initialized + */ +static inline int fc_lport_init_stats(struct fc_lport *lport) { - /* allocate per cpu stats block */ - lp->dev_stats = alloc_percpu(struct fcoe_dev_stats); - if (!lp->dev_stats) + lport->dev_stats = alloc_percpu(struct fcoe_dev_stats); + if (!lport->dev_stats) return -ENOMEM; return 0; } -static inline void fc_lport_free_stats(struct fc_lport *lp) +/** + * fc_lport_free_stats() - Free memory for a local port's statistics + * @lport: The local port whose statistics are to be freed + */ +static inline void fc_lport_free_stats(struct fc_lport *lport) { - free_percpu(lp->dev_stats); + free_percpu(lport->dev_stats); } -static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp) +/** + * fc_lport_get_stats() - Get a local port's statistics + * @lport: The local port whose statistics are to be retreived + */ +static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lport) { - return per_cpu_ptr(lp->dev_stats, smp_processor_id()); + return per_cpu_ptr(lport->dev_stats, smp_processor_id()); } -static inline void *lport_priv(const struct fc_lport *lp) +/** + * lport_priv() - Return the private data from a local port + * @lport: The local port whose private data is to be retreived + */ +static inline void *lport_priv(const struct fc_lport *lport) { - return (void *)(lp + 1); + return (void *)(lport + 1); } /** - * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport - * @sht: ptr to the scsi host templ - * @priv_size: size of private data after fc_lport + * libfc_host_alloc() - Allocate a Scsi_Host with room for a local port and + * LLD private data + * @sht: The SCSI host template + * @priv_size: Size of private data * * Returns: libfc lport */ @@ -765,156 +928,73 @@ libfc_host_alloc(struct scsi_host_template *sht, int priv_size) } /* - * LOCAL PORT LAYER + * FC_FCP HELPER FUNCTIONS *****************************/ -int fc_lport_init(struct fc_lport *lp); - -/* - * Destroy the specified local port by finding and freeing all - * fc_rports associated with it and then by freeing the fc_lport - * itself. - */ -int fc_lport_destroy(struct fc_lport *lp); - -/* - * Logout the specified local port from the fabric - */ -int fc_fabric_logoff(struct fc_lport *lp); - -/* - * Initiate the LP state machine. This handler will use fc_host_attr - * to store the FLOGI service parameters, so fc_host_attr must be - * initialized before calling this handler. - */ -int fc_fabric_login(struct fc_lport *lp); +static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp) +{ + if (fsp && fsp->cmd) + return fsp->cmd->sc_data_direction == DMA_FROM_DEVICE; + return false; +} /* - * The link is up for the given local port. - */ + * LOCAL PORT LAYER + *****************************/ +int fc_lport_init(struct fc_lport *); +int fc_lport_destroy(struct fc_lport *); +int fc_fabric_logoff(struct fc_lport *); +int fc_fabric_login(struct fc_lport *); void __fc_linkup(struct fc_lport *); void fc_linkup(struct fc_lport *); - -/* - * Link is down for the given local port. - */ void __fc_linkdown(struct fc_lport *); void fc_linkdown(struct fc_lport *); - -/* - * Configure the local port. - */ +void fc_vport_setlink(struct fc_lport *); +void fc_vports_linkchange(struct fc_lport *); int fc_lport_config(struct fc_lport *); - -/* - * Reset the local port. - */ int fc_lport_reset(struct fc_lport *); - -/* - * Set the mfs or reset - */ -int fc_set_mfs(struct fc_lport *lp, u32 mfs); - -/* - * Allocate a new lport struct for an NPIV VN_Port - */ -struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize); - -/* - * Find an NPIV VN_Port by port ID - */ -struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id); - -/* - * NPIV VN_Port link state management - */ -void fc_vport_setlink(struct fc_lport *vn_port); -void fc_vports_linkchange(struct fc_lport *n_port); - -/* - * Issue fc pass-thru request via bsg interface - */ -int fc_lport_bsg_request(struct fc_bsg_job *job); - +int fc_set_mfs(struct fc_lport *, u32 mfs); +struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize); +struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id); +int fc_lport_bsg_request(struct fc_bsg_job *); /* * REMOTE PORT LAYER *****************************/ -int fc_rport_init(struct fc_lport *lp); -void fc_rport_terminate_io(struct fc_rport *rp); +int fc_rport_init(struct fc_lport *); +void fc_rport_terminate_io(struct fc_rport *); /* * DISCOVERY LAYER *****************************/ -int fc_disc_init(struct fc_lport *lp); - +int fc_disc_init(struct fc_lport *); /* - * SCSI LAYER + * FCP LAYER *****************************/ -/* - * Initialize the SCSI block of libfc - */ int fc_fcp_init(struct fc_lport *); +void fc_fcp_destroy(struct fc_lport *); /* - * This section provides an API which allows direct interaction - * with the SCSI-ml. Each of these functions satisfies a function - * pointer defined in Scsi_Host and therefore is always called - * directly from the SCSI-ml. - */ -int fc_queuecommand(struct scsi_cmnd *sc_cmd, + * SCSI INTERACTION LAYER + *****************************/ +int fc_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); - -/* - * Send an ABTS frame to the target device. The sc_cmd argument - * is a pointer to the SCSI command to be aborted. - */ -int fc_eh_abort(struct scsi_cmnd *sc_cmd); - -/* - * Reset a LUN by sending send the tm cmd to the target. - */ -int fc_eh_device_reset(struct scsi_cmnd *sc_cmd); - -/* - * Reset the host adapter. - */ -int fc_eh_host_reset(struct scsi_cmnd *sc_cmd); - -/* - * Check rport status. - */ -int fc_slave_alloc(struct scsi_device *sdev); - -/* - * Adjust the queue depth. - */ -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason); - -/* - * Change the tag type. - */ -int fc_change_queue_type(struct scsi_device *sdev, int tag_type); - -/* - * Free memory pools used by the FCP layer. - */ -void fc_fcp_destroy(struct fc_lport *); +int fc_eh_abort(struct scsi_cmnd *); +int fc_eh_device_reset(struct scsi_cmnd *); +int fc_eh_host_reset(struct scsi_cmnd *); +int fc_slave_alloc(struct scsi_device *); +int fc_change_queue_depth(struct scsi_device *, int qdepth, int reason); +int fc_change_queue_type(struct scsi_device *, int tag_type); /* * ELS/CT interface *****************************/ -/* - * Initializes ELS/CT interface - */ -int fc_elsct_init(struct fc_lport *lp); -struct fc_seq *fc_elsct_send(struct fc_lport *lport, - u32 did, - struct fc_frame *fp, +int fc_elsct_init(struct fc_lport *); +struct fc_seq *fc_elsct_send(struct fc_lport *, u32 did, + struct fc_frame *, unsigned int op, void (*resp)(struct fc_seq *, - struct fc_frame *fp, + struct fc_frame *, void *arg), void *arg, u32 timer_msec); void fc_lport_flogi_resp(struct fc_seq *, struct fc_frame *, void *); @@ -924,90 +1004,26 @@ void fc_lport_logo_resp(struct fc_seq *, struct fc_frame *, void *); /* * EXCHANGE MANAGER LAYER *****************************/ -/* - * Initializes Exchange Manager related - * function pointers in struct libfc_function_template. - */ -int fc_exch_init(struct fc_lport *lp); - -/* - * Adds Exchange Manager (EM) mp to lport. - * - * Adds specified mp to lport using struct fc_exch_mgr_anchor, - * the struct fc_exch_mgr_anchor allows same EM sharing by - * more than one lport with their specified match function, - * the match function is used in allocating exchange from - * added mp. - */ -struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, - struct fc_exch_mgr *mp, +int fc_exch_init(struct fc_lport *); +struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *, + struct fc_exch_mgr *, bool (*match)(struct fc_frame *)); - -/* - * Deletes Exchange Manager (EM) from lport by removing - * its anchor ema from lport. - * - * If removed anchor ema was the last user of its associated EM - * then also destroys associated EM. - */ -void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema); - -/* - * Clone an exchange manager list, getting reference holds for each EM. - * This is for use with NPIV and sharing the X_ID space between VN_Ports. - */ +void fc_exch_mgr_del(struct fc_exch_mgr_anchor *); int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst); - -/* - * Allocates an Exchange Manager (EM). - * - * The EM manages exchanges for their allocation and - * free, also allows exchange lookup for received - * frame. - * - * The class is used for initializing FC class of - * allocated exchange from EM. - * - * The min_xid and max_xid will limit new - * exchange ID (XID) within this range for - * a new exchange. - * The LLD may choose to have multiple EMs, - * e.g. one EM instance per CPU receive thread in LLD. - * - * Specified match function is used in allocating exchanges - * from newly allocated EM. - */ -struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, - enum fc_class class, - u16 min_xid, - u16 max_xid, +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *, enum fc_class class, + u16 min_xid, u16 max_xid, bool (*match)(struct fc_frame *)); - -/* - * Free all exchange managers of a lport. - */ -void fc_exch_mgr_free(struct fc_lport *lport); - -/* - * Receive a frame on specified local port and exchange manager. - */ -void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp); - -/* - * Reset all EMs of a lport, releasing its all sequences and - * exchanges. If sid is non-zero, then reset only exchanges - * we sourced from that FID. If did is non-zero, reset only - * exchanges destined to that FID. - */ +void fc_exch_mgr_free(struct fc_lport *); +void fc_exch_recv(struct fc_lport *, struct fc_frame *); void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id); /* * Functions for fc_functions_template */ -void fc_get_host_speed(struct Scsi_Host *shost); -void fc_get_host_port_type(struct Scsi_Host *shost); -void fc_get_host_port_state(struct Scsi_Host *shost); -void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout); +void fc_get_host_speed(struct Scsi_Host *); +void fc_get_host_port_type(struct Scsi_Host *); +void fc_get_host_port_state(struct Scsi_Host *); +void fc_set_rport_loss_tmo(struct fc_rport *, u32 timeout); struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); #endif /* _LIBFC_H_ */ -- cgit v1.2.3-59-g8ed1b From 70b51aabf3b03fbf8d61c14847ccce4c69fb0cdd Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:47:45 -0800 Subject: [SCSI] libfcoe: formatting and comment cleanups Ensures that there are kernel-doc style comments for all routines and structures. There were also a few instances of fc_lport's named 'lp' which were switched to 'lport' as per the libfc/libfcoe/fcoe naming convention. Also, emacs 'indent-region' and 'tabify' were ran on libfcoe.c. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 216 +++++++++++++++++++++++--------------------- include/scsi/libfcoe.h | 84 ++++++++--------- 2 files changed, 153 insertions(+), 147 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 6a93ba96569f..6b07a8400889 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -59,15 +59,15 @@ unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ +#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ -#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ -do { \ - if (unlikely(libfcoe_debug_logging & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ +#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(libfcoe_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ } while (0) #define LIBFCOE_DBG(fmt, args...) \ @@ -78,7 +78,10 @@ do { \ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ printk(KERN_INFO "fip: " fmt, ##args);) -/* +/** + * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid + * @fcf: The FCF to check + * * Return non-zero if FCF fcoe_size has been validated. */ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) @@ -86,7 +89,10 @@ static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf) return (fcf->flags & FIP_FL_SOL) != 0; } -/* +/** + * fcoe_ctlr_fcf_usable() - Check if a FCF is usable + * @fcf: The FCF to check + * * Return non-zero if the FCF is usable. */ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) @@ -97,8 +103,8 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) } /** - * fcoe_ctlr_init() - Initialize the FCoE Controller instance. - * @fip: FCoE controller. + * fcoe_ctlr_init() - Initialize the FCoE Controller instance + * @fip: The FCoE controller to initialize */ void fcoe_ctlr_init(struct fcoe_ctlr *fip) { @@ -114,8 +120,8 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip) EXPORT_SYMBOL(fcoe_ctlr_init); /** - * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller. - * @fip: FCoE controller. + * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller + * @fip: The FCoE controller whose FCFs are to be reset * * Called with &fcoe_ctlr lock held. */ @@ -134,8 +140,8 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) } /** - * fcoe_ctlr_destroy() - Disable and tear-down the FCoE controller. - * @fip: FCoE controller. + * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller + * @fip: The FCoE controller to tear down * * This is called by FCoE drivers before freeing the &fcoe_ctlr. * @@ -162,8 +168,8 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) EXPORT_SYMBOL(fcoe_ctlr_destroy); /** - * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port. - * @fip: FCoE controller. + * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port + * @fip: The FCoE controller to get the maximum FCoE size from * * Returns the maximum packet size including the FCoE header and trailer, * but not including any Ethernet or VLAN headers. @@ -180,9 +186,9 @@ static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip) } /** - * fcoe_ctlr_solicit() - Send a solicitation. - * @fip: FCoE controller. - * @fcf: Destination FCF. If NULL, a multicast solicitation is sent. + * fcoe_ctlr_solicit() - Send a FIP solicitation + * @fip: The FCoE controller to send the solicitation on + * @fcf: The destination FCF (if NULL, a multicast solicitation is sent) */ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) { @@ -241,8 +247,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) } /** - * fcoe_ctlr_link_up() - Start FCoE controller. - * @fip: FCoE controller. + * fcoe_ctlr_link_up() - Start FCoE controller + * @fip: The FCoE controller to start * * Called from the LLD when the network link is ready. */ @@ -268,15 +274,15 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) EXPORT_SYMBOL(fcoe_ctlr_link_up); /** - * fcoe_ctlr_reset() - Reset FIP. - * @fip: FCoE controller. - * @new_state: FIP state to be entered. + * fcoe_ctlr_reset() - Reset a FCoE controller + * @fip: The FCoE controller to reset + * @new_state: The FIP state to be entered * * Returns non-zero if the link was up and now isn't. */ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) { - struct fc_lport *lp = fip->lp; + struct fc_lport *lport = fip->lp; int link_dropped; spin_lock_bh(&fip->lock); @@ -294,19 +300,19 @@ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) spin_unlock_bh(&fip->lock); if (link_dropped) - fc_linkdown(lp); + fc_linkdown(lport); if (new_state == FIP_ST_ENABLED) { fcoe_ctlr_solicit(fip, NULL); - fc_linkup(lp); + fc_linkup(lport); link_dropped = 0; } return link_dropped; } /** - * fcoe_ctlr_link_down() - Stop FCoE controller. - * @fip: FCoE controller. + * fcoe_ctlr_link_down() - Stop a FCoE controller + * @fip: The FCoE controller to be stopped * * Returns non-zero if the link was up and now isn't. * @@ -320,11 +326,11 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) EXPORT_SYMBOL(fcoe_ctlr_link_down); /** - * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF. - * @fip: FCoE controller. - * @lport: libfc fc_lport to send from - * @ports: 0 for controller keep-alive, 1 for port keep-alive. - * @sa: source MAC address. + * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF + * @fip: The FCoE controller to send the FKA on + * @lport: libfc fc_lport to send from + * @ports: 0 for controller keep-alive, 1 for port keep-alive + * @sa: The source MAC address * * A controller keep-alive is sent every fka_period (typically 8 seconds). * The source MAC is the native MAC address. @@ -369,7 +375,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, kal->fip.fip_op = htons(FIP_OP_CTRL); kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE; kal->fip.fip_dl_len = htons((sizeof(kal->mac) + - ports * sizeof(*vn)) / FIP_BPW); + ports * sizeof(*vn)) / FIP_BPW); kal->fip.fip_flags = htons(FIP_FL_FPMA); if (fip->spma) kal->fip.fip_flags |= htons(FIP_FL_SPMA); @@ -393,11 +399,10 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, } /** - * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it. - * @fip: FCoE controller. - * @lport: libfc fc_lport to use for the source address - * @dtype: FIP descriptor type for the frame. - * @skb: FCoE ELS frame including FC header but no FCoE headers. + * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it + * @fip: The FCoE controller for the ELS frame + * @dtype: The FIP descriptor type for the frame + * @skb: The FCoE ELS frame including FC header but no FCoE headers * * Returns non-zero error code on failure. * @@ -553,9 +558,9 @@ drop: } EXPORT_SYMBOL(fcoe_ctlr_els_send); -/* - * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller. - * @fip: FCoE controller. +/** + * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller + * @fip: The FCoE controller to free FCFs on * * Called with lock held. * @@ -596,9 +601,9 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) } /** - * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry. - * @skb: received FIP advertisement frame - * @fcf: resulting FCF entry. + * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry + * @skb: The received FIP advertisement frame + * @fcf: The resulting FCF entry * * Returns zero on a valid parsed advertisement, * otherwise returns non zero value. @@ -699,9 +704,9 @@ len_err: } /** - * fcoe_ctlr_recv_adv() - Handle an incoming advertisement. - * @fip: FCoE controller. - * @skb: Received FIP packet. + * fcoe_ctlr_recv_adv() - Handle an incoming advertisement + * @fip: The FCoE controller receiving the advertisement + * @skb: The received FIP packet */ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) { @@ -784,7 +789,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) */ if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) { fip->sel_time = jiffies + - msecs_to_jiffies(FCOE_CTLR_START_DELAY); + msecs_to_jiffies(FCOE_CTLR_START_DELAY); if (!timer_pending(&fip->timer) || time_before(fip->sel_time, fip->timer.expires)) mod_timer(&fip->timer, fip->sel_time); @@ -794,13 +799,13 @@ out: } /** - * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame. - * @fip: FCoE controller. - * @skb: Received FIP packet. + * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame + * @fip: The FCoE controller which received the packet + * @skb: The received FIP packet */ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) { - struct fc_lport *lp = fip->lp; + struct fc_lport *lport = fip->lp; struct fip_header *fiph; struct fc_frame *fp = (struct fc_frame *)skb; struct fc_frame_header *fh = NULL; @@ -886,13 +891,13 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) fc_frame_init(fp); fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; - fr_dev(fp) = lp; + fr_dev(fp) = lport; - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); stats->RxFrames++; stats->RxWords += skb->len / FIP_BPW; - fc_exch_recv(lp, fp); + fc_exch_recv(lport, fp); return; len_err: @@ -903,15 +908,15 @@ drop: } /** - * fcoe_ctlr_recv_els() - Handle an incoming link reset frame. - * @fip: FCoE controller. - * @fh: Received FIP header. + * fcoe_ctlr_recv_els() - Handle an incoming link reset frame + * @fip: The FCoE controller that received the frame + * @fh: The received FIP header * * There may be multiple VN_Port descriptors. * The overall length has already been checked. */ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, - struct fip_header *fh) + struct fip_header *fh) { struct fip_desc *desc; struct fip_mac_desc *mp; @@ -920,13 +925,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, size_t rlen; size_t dlen; struct fcoe_fcf *fcf = fip->sel_fcf; - struct fc_lport *lp = fip->lp; + struct fc_lport *lport = fip->lp; u32 desc_mask; LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); if (!fcf) return; - if (!fcf || !fc_host_port_id(lp->host)) + if (!fcf || !fc_host_port_id(lport->host)) return; /* @@ -962,9 +967,10 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, if (dlen < sizeof(*vp)) return; if (compare_ether_addr(vp->fd_mac, - fip->get_src_addr(lp)) == 0 && - get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn && - ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host)) + fip->get_src_addr(lport)) == 0 && + get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && + ntoh24(vp->fd_fc_id) == + fc_host_port_id(lport->host)) desc_mask &= ~BIT(FIP_DT_VN_ID); break; default: @@ -989,9 +995,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, } /** - * fcoe_ctlr_recv() - Receive a FIP frame. - * @fip: FCoE controller. - * @skb: Received FIP packet. + * fcoe_ctlr_recv() - Receive a FIP packet + * @fip: The FCoE controller that received the packet + * @skb: The received FIP packet * * This is called from NET_RX_SOFTIRQ. */ @@ -1005,9 +1011,9 @@ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) EXPORT_SYMBOL(fcoe_ctlr_recv); /** - * fcoe_ctlr_recv_handler() - Receive a FIP frame. - * @fip: FCoE controller. - * @skb: Received FIP packet. + * fcoe_ctlr_recv_handler() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame * * Returns non-zero if the frame is dropped. */ @@ -1064,8 +1070,8 @@ drop: } /** - * fcoe_ctlr_select() - Select the best FCF, if possible. - * @fip: FCoE controller. + * fcoe_ctlr_select() - Select the best FCF (if possible) + * @fip: The FCoE controller * * If there are conflicting advertisements, no FCF can be chosen. * @@ -1106,8 +1112,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) } /** - * fcoe_ctlr_timeout() - FIP timer function. - * @arg: &fcoe_ctlr pointer. + * fcoe_ctlr_timeout() - FIP timeout handler + * @arg: The FCoE controller that timed out * * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives. */ @@ -1142,12 +1148,12 @@ static void fcoe_ctlr_timeout(unsigned long arg) fip->lp->host->host_no, sel->fcf_mac); memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); fip->port_ka_time = jiffies + - msecs_to_jiffies(FIP_VN_KA_PERIOD); + msecs_to_jiffies(FIP_VN_KA_PERIOD); fip->ctlr_ka_time = jiffies + sel->fka_period; fip->link = 1; } else { printk(KERN_NOTICE "libfcoe: host%d: " - "FIP Fibre-Channel Forwarder timed out. " + "FIP Fibre-Channel Forwarder timed out. " "Starting FCF discovery.\n", fip->lp->host->host_no); fip->link = 0; @@ -1165,7 +1171,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) if (time_after_eq(jiffies, fip->port_ka_time)) { fip->port_ka_time += jiffies + - msecs_to_jiffies(FIP_VN_KA_PERIOD); + msecs_to_jiffies(FIP_VN_KA_PERIOD); fip->send_port_ka = 1; } if (time_after(next_timer, fip->port_ka_time)) @@ -1173,7 +1179,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) mod_timer(&fip->timer, next_timer); } else if (fip->sel_time) { next_timer = fip->sel_time + - msecs_to_jiffies(FCOE_CTLR_START_DELAY); + msecs_to_jiffies(FCOE_CTLR_START_DELAY); mod_timer(&fip->timer, next_timer); } if (fip->send_ctlr_ka || fip->send_port_ka) @@ -1182,8 +1188,8 @@ static void fcoe_ctlr_timeout(unsigned long arg) } /** - * fcoe_ctlr_link_work() - worker thread function for link changes. - * @work: pointer to link_work member inside &fcoe_ctlr. + * fcoe_ctlr_link_work() - Worker thread function for link changes + * @work: Handle to a FCoE controller * * See if the link status has changed and if so, report it. * @@ -1230,8 +1236,8 @@ static void fcoe_ctlr_link_work(struct work_struct *work) } /** - * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames. - * @recv_work: pointer to recv_work member inside &fcoe_ctlr. + * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames + * @recv_work: Handle to a FCoE controller */ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) { @@ -1249,11 +1255,10 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) } /** - * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request. - * @fip: FCoE controller. - * @lport: libfc fc_lport instance received on - * @fp: FC frame. - * @sa: Ethernet source MAC address from received FCoE frame. + * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response or request + * @fip: The FCoE controller + * @fp: The FC frame to snoop + * @sa: Ethernet source MAC address from received FCoE frame * * Snoop potential response to FLOGI or even incoming FLOGI. * @@ -1323,10 +1328,10 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); /** - * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. - * @mac: mac address - * @scheme: check port - * @port: port indicator for converting + * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN + * @mac: The MAC address to convert + * @scheme: The scheme to use when converting + * @port: The port indicator for converting * * Returns: u64 fc world wide name */ @@ -1364,23 +1369,24 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); /** - * fcoe_libfc_config() - sets up libfc related properties for lport - * @lp: ptr to the fc_lport - * @tt: libfc function template + * fcoe_libfc_config() - Sets up libfc related properties for local port + * @lp: The local port to configure libfc for + * @tt: The libfc function template * * Returns : 0 for success */ -int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) +int fcoe_libfc_config(struct fc_lport *lport, + struct libfc_function_template *tt) { /* Set the function pointers set by the LLDD */ - memcpy(&lp->tt, tt, sizeof(*tt)); - if (fc_fcp_init(lp)) + memcpy(&lport->tt, tt, sizeof(*tt)); + if (fc_fcp_init(lport)) return -ENOMEM; - fc_exch_init(lp); - fc_elsct_init(lp); - fc_lport_init(lp); - fc_rport_init(lp); - fc_disc_init(lp); + fc_exch_init(lport); + fc_elsct_init(lport); + fc_lport_init(lport); + fc_rport_init(lport); + fc_disc_init(lport); return 0; } diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 8ef5e209c216..76d08c9a7678 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -53,35 +53,35 @@ enum fip_state { }; /** - * struct fcoe_ctlr - FCoE Controller and FIP state. - * @state: internal FIP state for network link and FIP or non-FIP mode. - * @lp: &fc_lport: libfc local port. - * @sel_fcf: currently selected FCF, or NULL. - * @fcfs: list of discovered FCFs. - * @fcf_count: number of discovered FCF entries. - * @sol_time: time when a multicast solicitation was last sent. - * @sel_time: time after which to select an FCF. - * @port_ka_time: time of next port keep-alive. - * @ctlr_ka_time: time of next controller keep-alive. - * @timer: timer struct used for all delayed events. - * @link_work: &work_struct for doing FCF selection. - * @recv_work: &work_struct for receiving FIP frames. + * struct fcoe_ctlr - FCoE Controller and FIP state + * @state: internal FIP state for network link and FIP or non-FIP mode. + * @lp: &fc_lport: libfc local port. + * @sel_fcf: currently selected FCF, or NULL. + * @fcfs: list of discovered FCFs. + * @fcf_count: number of discovered FCF entries. + * @sol_time: time when a multicast solicitation was last sent. + * @sel_time: time after which to select an FCF. + * @port_ka_time: time of next port keep-alive. + * @ctlr_ka_time: time of next controller keep-alive. + * @timer: timer struct used for all delayed events. + * @link_work: &work_struct for doing FCF selection. + * @recv_work: &work_struct for receiving FIP frames. * @fip_recv_list: list of received FIP frames. - * @user_mfs: configured maximum FC frame size, including FC header. - * @flogi_oxid: exchange ID of most recent fabric login. - * @flogi_count: number of FLOGI attempts in AUTO mode. - * @link: current link status for libfc. - * @last_link: last link state reported to libfc. - * @map_dest: use the FC_MAP mode for destination MAC addresses. - * @spma: supports SPMA server-provided MACs mode - * @send_ctlr_ka: need to send controller keep alive - * @send_port_ka: need to send port keep alives - * @dest_addr: MAC address of the selected FC forwarder. - * @ctl_src_addr: the native MAC address of our local port. - * @send: LLD-supplied function to handle sending of FIP Ethernet frames. - * @update_mac: LLD-supplied function to handle changes to MAC addresses. - * @get_src_addr: LLD-supplied function to supply a source MAC address. - * @lock: lock protecting this structure. + * @user_mfs: configured maximum FC frame size, including FC header. + * @flogi_oxid: exchange ID of most recent fabric login. + * @flogi_count: number of FLOGI attempts in AUTO mode. + * @link: current link status for libfc. + * @last_link: last link state reported to libfc. + * @map_dest: use the FC_MAP mode for destination MAC addresses. + * @spma: supports SPMA server-provided MACs mode + * @send_ctlr_ka: need to send controller keep alive + * @send_port_ka: need to send port keep alives + * @dest_addr: MAC address of the selected FC forwarder. + * @ctl_src_addr: the native MAC address of our local port. + * @send: LLD-supplied function to handle sending FIP Ethernet frames + * @update_mac: LLD-supplied function to handle changes to MAC addresses. + * @get_src_addr: LLD-supplied function to supply a source MAC address. + * @lock: lock protecting this structure. * * This structure is used by all FCoE drivers. It contains information * needed by all FCoE low-level drivers (LLDs) as well as internal state @@ -119,18 +119,18 @@ struct fcoe_ctlr { spinlock_t lock; }; -/* - * struct fcoe_fcf - Fibre-Channel Forwarder. - * @list: list linkage. - * @time: system time (jiffies) when an advertisement was last received. - * @switch_name: WWN of switch from advertisement. - * @fabric_name: WWN of fabric from advertisement. - * @fc_map: FC_MAP value from advertisement. - * @fcf_mac: Ethernet address of the FCF. - * @vfid: virtual fabric ID. - * @pri: seletion priority, smaller values are better. - * @flags: flags received from advertisement. - * @fka_period: keep-alive period, in jiffies. +/** + * struct fcoe_fcf - Fibre-Channel Forwarder + * @list: list linkage + * @time: system time (jiffies) when an advertisement was last received + * @switch_name: WWN of switch from advertisement + * @fabric_name: WWN of fabric from advertisement + * @fc_map: FC_MAP value from advertisement + * @fcf_mac: Ethernet address of the FCF + * @vfid: virtual fabric ID + * @pri: selection priority, smaller values are better + * @flags: flags received from advertisement + * @fka_period: keep-alive period, in jiffies * * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that * passes FCoE frames on to an FC fabric. This structure represents @@ -161,8 +161,8 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *); int fcoe_ctlr_link_down(struct fcoe_ctlr *); int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct fc_lport *, struct sk_buff *); void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *); -int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *lport, - struct fc_frame *fp, u8 *sa); +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, + struct fc_frame *, u8 *); /* libfcoe funcs */ u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); -- cgit v1.2.3-59-g8ed1b From 1875f27e291d05711f15a8a3d486abfeaf385931 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 3 Nov 2009 11:47:50 -0800 Subject: [SCSI] fcoe: Formatting cleanups and commenting Added kernel-doc comment blocks to all structures and functions. Renamed fc_lport instances rom lp to lport to be inline with our naming convention. Renamed all misnamed net_device instances to netdev to be inline with our naming convention. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 722 +++++++++++++++++++++++++---------------------- drivers/scsi/fcoe/fcoe.h | 75 +++-- 2 files changed, 441 insertions(+), 356 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 8f078d306a0a..5615dfe10bf5 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -66,14 +66,14 @@ LIST_HEAD(fcoe_hostlist); DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ -static int fcoe_reset(struct Scsi_Host *shost); +static int fcoe_reset(struct Scsi_Host *); static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); -static int fcoe_percpu_receive_thread(void *arg); -static void fcoe_clean_pending_queue(struct fc_lport *lp); -static void fcoe_percpu_clean(struct fc_lport *lp); -static int fcoe_link_ok(struct fc_lport *lp); +static int fcoe_percpu_receive_thread(void *); +static void fcoe_clean_pending_queue(struct fc_lport *); +static void fcoe_percpu_clean(struct fc_lport *); +static int fcoe_link_ok(struct fc_lport *); static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); @@ -82,21 +82,68 @@ static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); -static struct fcoe_interface * - fcoe_hostlist_lookup_port(const struct net_device *dev); +static struct fcoe_interface +*fcoe_hostlist_lookup_port(const struct net_device *); + +static int fcoe_fip_recv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); + +static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); +static void fcoe_update_src_mac(struct fc_lport *, u8 *); +static u8 *fcoe_get_src_mac(struct fc_lport *); +static void fcoe_destroy_work(struct work_struct *); + +static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, + unsigned int); +static int fcoe_ddp_done(struct fc_lport *, u16); + +static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); + +static int fcoe_create(const char *, struct kernel_param *); +static int fcoe_destroy(const char *, struct kernel_param *); + +static u8 *fcoe_get_src_mac(struct fc_lport *); +static void fcoe_destroy_work(struct work_struct *); -/* notification function from net device */ +static struct fc_seq *fcoe_elsct_send(struct fc_lport *, + u32 did, struct fc_frame *, + unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *, u32 timeout); + +module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(create, "string"); +MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); +module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(destroy, "string"); +MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); + +/* notification function for packets from net device */ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, }; +/* notification function for CPU hotplug events */ +static struct notifier_block fcoe_cpu_notifier = { + .notifier_call = fcoe_cpu_callback, +}; + static struct scsi_transport_template *fcoe_transport_template; static struct scsi_transport_template *fcoe_vport_transport_template; -static int fcoe_vport_destroy(struct fc_vport *vport); -static int fcoe_vport_create(struct fc_vport *vport, bool disabled); -static int fcoe_vport_disable(struct fc_vport *vport, bool disable); -static void fcoe_set_vport_symbolic_name(struct fc_vport *vport); +static int fcoe_vport_destroy(struct fc_vport *); +static int fcoe_vport_create(struct fc_vport *, bool disabled); +static int fcoe_vport_disable(struct fc_vport *, bool disable); +static void fcoe_set_vport_symbolic_name(struct fc_vport *); + +static struct libfc_function_template fcoe_libfc_fcn_templ = { + .frame_send = fcoe_xmit, + .ddp_setup = fcoe_ddp_setup, + .ddp_done = fcoe_ddp_done, + .elsct_send = fcoe_elsct_send, +}; struct fc_function_template fcoe_transport_function = { .show_host_node_name = 1, @@ -192,13 +239,10 @@ static struct scsi_host_template fcoe_shost_template = { .max_sectors = 0xffff, }; -static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); /** - * fcoe_interface_setup() - * @fcoe: new fcoe_interface - * @netdev : ptr to the associated netdevice struct + * fcoe_interface_setup() - Setup a FCoE interface + * @fcoe: The new FCoE interface + * @netdev: The net device that the fcoe interface is on * * Returns : 0 for success * Locking: must be called with the RTNL mutex held @@ -273,14 +317,9 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, return 0; } -static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); -static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr); -static u8 *fcoe_get_src_mac(struct fc_lport *lport); -static void fcoe_destroy_work(struct work_struct *work); - /** - * fcoe_interface_create() - * @netdev: network interface + * fcoe_interface_create() - Create a FCoE interface on a net device + * @netdev: The net device to create the FCoE interface on * * Returns: pointer to a struct fcoe_interface or NULL on error */ @@ -311,8 +350,8 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) } /** - * fcoe_interface_cleanup() - clean up netdev configurations - * @fcoe: + * fcoe_interface_cleanup() - Clean up a FCoE interface + * @fcoe: The FCoE interface to be cleaned up * * Caller must be holding the RTNL mutex */ @@ -351,7 +390,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /** * fcoe_interface_release() - fcoe_port kref release function - * @kref: embedded reference count in an fcoe_interface struct + * @kref: Embedded reference count in an fcoe_interface struct */ static void fcoe_interface_release(struct kref *kref) { @@ -367,8 +406,8 @@ static void fcoe_interface_release(struct kref *kref) } /** - * fcoe_interface_get() - * @fcoe: + * fcoe_interface_get() - Get a reference to a FCoE interface + * @fcoe: The FCoE interface to be held */ static inline void fcoe_interface_get(struct fcoe_interface *fcoe) { @@ -376,8 +415,8 @@ static inline void fcoe_interface_get(struct fcoe_interface *fcoe) } /** - * fcoe_interface_put() - * @fcoe: + * fcoe_interface_put() - Put a reference to a FCoE interface + * @fcoe: The FCoE interface to be released */ static inline void fcoe_interface_put(struct fcoe_interface *fcoe) { @@ -385,15 +424,16 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe) } /** - * fcoe_fip_recv - handle a received FIP frame. - * @skb: the receive skb - * @dev: associated &net_device - * @ptype: the &packet_type structure which was used to register this handler. - * @orig_dev: original receive &net_device, in case @dev is a bond. + * fcoe_fip_recv() - Handler for received FIP frames + * @skb: The receive skb + * @netdev: The associated net device + * @ptype: The packet_type structure which was used to register this handler + * @orig_dev: The original net_device the the skb was received on. + * (in case dev is a bond) * * Returns: 0 for success */ -static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, +static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, struct packet_type *ptype, struct net_device *orig_dev) { @@ -405,9 +445,9 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, } /** - * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame. - * @fip: FCoE controller. - * @skb: FIP Packet. + * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame + * @fip: The FCoE controller + * @skb: The FIP packet to be sent */ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { @@ -416,9 +456,9 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) } /** - * fcoe_update_src_mac() - Update Ethernet MAC filters. - * @lport: libfc lport - * @addr: Unicast MAC address to add. + * fcoe_update_src_mac() - Update the Ethernet MAC filters + * @lport: The local port to update the source MAC on + * @addr: Unicast MAC address to add * * Remove any previously-set unicast MAC filter. * Add secondary FCoE MAC address filter for our OUI. @@ -449,60 +489,59 @@ static u8 *fcoe_get_src_mac(struct fc_lport *lport) } /** - * fcoe_lport_config() - sets up the fc_lport - * @lp: ptr to the fc_lport + * fcoe_lport_config() - Set up a local port + * @lport: The local port to be setup * * Returns: 0 for success */ -static int fcoe_lport_config(struct fc_lport *lp) +static int fcoe_lport_config(struct fc_lport *lport) { - lp->link_up = 0; - lp->qfull = 0; - lp->max_retry_count = 3; - lp->max_rport_retry_count = 3; - lp->e_d_tov = 2 * 1000; /* FC-FS default */ - lp->r_a_tov = 2 * 2 * 1000; - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | - FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); - lp->does_npiv = 1; - - fc_lport_init_stats(lp); + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = 3; + lport->max_rport_retry_count = 3; + lport->e_d_tov = 2 * 1000; /* FC-FS default */ + lport->r_a_tov = 2 * 2 * 1000; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->does_npiv = 1; + + fc_lport_init_stats(lport); /* lport fc_lport related configuration */ - fc_lport_config(lp); + fc_lport_config(lport); /* offload related configuration */ - lp->crc_offload = 0; - lp->seq_offload = 0; - lp->lro_enabled = 0; - lp->lro_xid = 0; - lp->lso_max = 0; + lport->crc_offload = 0; + lport->seq_offload = 0; + lport->lro_enabled = 0; + lport->lro_xid = 0; + lport->lso_max = 0; return 0; } /** - * fcoe_queue_timer() - fcoe queue timer - * @lp: the fc_lport pointer + * fcoe_queue_timer() - The fcoe queue timer + * @lport: The local port * * Calls fcoe_check_wait_queue on timeout - * */ -static void fcoe_queue_timer(ulong lp) +static void fcoe_queue_timer(ulong lport) { - fcoe_check_wait_queue((struct fc_lport *)lp, NULL); + fcoe_check_wait_queue((struct fc_lport *)lport, NULL); } /** - * fcoe_netdev_config() - Set up netdev for SW FCoE - * @lp : ptr to the fc_lport - * @netdev : ptr to the associated netdevice struct + * fcoe_netdev_config() - Set up net devive for SW FCoE + * @lport: The local port that is associated with the net device + * @netdev: The associated net device * - * Must be called after fcoe_lport_config() as it will use lport mutex + * Must be called after fcoe_lport_config() as it will use local port mutex * - * Returns : 0 for success + * Returns: 0 for success */ -static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) +static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) { u32 mfs; u64 wwnn, wwpn; @@ -510,7 +549,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) struct fcoe_port *port; /* Setup lport private data to point to fcoe softc */ - port = lport_priv(lp); + port = lport_priv(lport); fcoe = port->fcoe; /* @@ -524,91 +563,100 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); } mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); - if (fc_set_mfs(lp, mfs)) + if (fc_set_mfs(lport, mfs)) return -EINVAL; /* offload features support */ if (netdev->features & NETIF_F_SG) - lp->sg_supp = 1; + lport->sg_supp = 1; if (netdev->features & NETIF_F_FCOE_CRC) { - lp->crc_offload = 1; + lport->crc_offload = 1; FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); } if (netdev->features & NETIF_F_FSO) { - lp->seq_offload = 1; - lp->lso_max = netdev->gso_max_size; + lport->seq_offload = 1; + lport->lso_max = netdev->gso_max_size; FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", - lp->lso_max); + lport->lso_max); } if (netdev->fcoe_ddp_xid) { - lp->lro_enabled = 1; - lp->lro_xid = netdev->fcoe_ddp_xid; + lport->lro_enabled = 1; + lport->lro_xid = netdev->fcoe_ddp_xid; FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", - lp->lro_xid); + lport->lro_xid); } skb_queue_head_init(&port->fcoe_pending_queue); port->fcoe_pending_queue_active = 0; - setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); + setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); - if (!lp->vport) { + if (!lport->vport) { wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); - fc_set_wwnn(lp, wwnn); + fc_set_wwnn(lport, wwnn); /* XXX - 3rd arg needs to be vlan id */ wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); - fc_set_wwpn(lp, wwpn); + fc_set_wwpn(lport, wwpn); } return 0; } /** - * fcoe_shost_config() - Sets up fc_lport->host - * @lp : ptr to the fc_lport - * @shost : ptr to the associated scsi host - * @dev : device associated to scsi host + * fcoe_shost_config() - Set up the SCSI host associated with a local port + * @lport: The local port + * @shost: The SCSI host to associate with the local port + * @dev: The device associated with the SCSI host * * Must be called after fcoe_lport_config() and fcoe_netdev_config() * - * Returns : 0 for success + * Returns: 0 for success */ -static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, - struct device *dev) +static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost, + struct device *dev) { int rc = 0; /* lport scsi host config */ - lp->host->max_lun = FCOE_MAX_LUN; - lp->host->max_id = FCOE_MAX_FCP_TARGET; - lp->host->max_channel = 0; - if (lp->vport) - lp->host->transportt = fcoe_vport_transport_template; + lport->host->max_lun = FCOE_MAX_LUN; + lport->host->max_id = FCOE_MAX_FCP_TARGET; + lport->host->max_channel = 0; + if (lport->vport) + lport->host->transportt = fcoe_vport_transport_template; else - lp->host->transportt = fcoe_transport_template; + lport->host->transportt = fcoe_transport_template; /* add the new host to the SCSI-ml */ - rc = scsi_add_host(lp->host, dev); + rc = scsi_add_host(lport->host, dev); if (rc) { - FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " + FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " "error on scsi_add_host\n"); return rc; } - if (!lp->vport) - fc_host_max_npiv_vports(lp->host) = USHORT_MAX; + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHORT_MAX; - snprintf(fc_host_symbolic_name(lp->host), FC_SYMBOLIC_NAME_SIZE, + snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s v%s over %s", FCOE_NAME, FCOE_VERSION, - fcoe_netdev(lp)->name); + fcoe_netdev(lport)->name); return 0; } -/* - * fcoe_oem_match() - match for read types IO - * @fp: the fc_frame for new IO. +/** + * fcoe_oem_match() - The match routine for the offloaded exchange manager + * @fp: The I/O frame * - * Returns : true for read types IO, otherwise returns false. + * This routine will be associated with an exchange manager (EM). When + * the libfc exchange handling code is looking for an EM to use it will + * call this routine and pass it the frame that it wishes to send. This + * routine will return True if the associated EM is to be used and False + * if the echange code should continue looking for an EM. + * + * The offload EM that this routine is associated with will handle any + * packets that are for SCSI read requests. + * + * Returns: True for read types I/O, otherwise returns false. */ bool fcoe_oem_match(struct fc_frame *fp) { @@ -617,14 +665,14 @@ bool fcoe_oem_match(struct fc_frame *fp) } /** - * fcoe_em_config() - allocates em for this lport - * @lp: the fcoe that em is to allocated for + * fcoe_em_config() - Allocate and configure an exchange manager + * @lport: The local port that the new EM will be associated with * - * Returns : 0 on success + * Returns: 0 on success */ -static inline int fcoe_em_config(struct fc_lport *lp) +static inline int fcoe_em_config(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lp); + struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->fcoe; struct fcoe_interface *oldfcoe = NULL; struct net_device *old_real_dev, *cur_real_dev; @@ -635,8 +683,9 @@ static inline int fcoe_em_config(struct fc_lport *lp) * Check if need to allocate an em instance for * offload exchange ids to be shared across all VN_PORTs/lport. */ - if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) { - lp->lro_xid = 0; + if (!lport->lro_enabled || !lport->lro_xid || + (lport->lro_xid >= max_xid)) { + lport->lro_xid = 0; goto skip_oem; } @@ -662,16 +711,16 @@ static inline int fcoe_em_config(struct fc_lport *lp) } if (fcoe->oem) { - if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) { + if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { printk(KERN_ERR "fcoe_em_config: failed to add " "offload em:%p on interface:%s\n", fcoe->oem, fcoe->netdev->name); return -ENOMEM; } } else { - fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, - FCOE_MIN_XID, lp->lro_xid, - fcoe_oem_match); + fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, + FCOE_MIN_XID, lport->lro_xid, + fcoe_oem_match); if (!fcoe->oem) { printk(KERN_ERR "fcoe_em_config: failed to allocate " "em for offload exches on interface:%s\n", @@ -683,10 +732,10 @@ static inline int fcoe_em_config(struct fc_lport *lp) /* * Exclude offload EM xid range from next EM xid range. */ - min_xid += lp->lro_xid + 1; + min_xid += lport->lro_xid + 1; skip_oem: - if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { printk(KERN_ERR "fcoe_em_config: failed to " "allocate em on interface %s\n", fcoe->netdev->name); return -ENOMEM; @@ -696,8 +745,8 @@ skip_oem: } /** - * fcoe_if_destroy() - FCoE software HBA tear-down function - * @lport: fc_lport to destroy + * fcoe_if_destroy() - Tear down a SW FCoE instance + * @lport: The local port to be destroyed */ static void fcoe_if_destroy(struct fc_lport *lport) { @@ -745,72 +794,62 @@ static void fcoe_if_destroy(struct fc_lport *lport) scsi_host_put(lport->host); } -/* - * fcoe_ddp_setup - calls LLD's ddp_setup through net_device - * @lp: the corresponding fc_lport - * @xid: the exchange id for this ddp transfer - * @sgl: the scatterlist describing this transfer - * @sgc: number of sg items +/** + * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device + * @lport: The local port to setup DDP for + * @xid: The exchange ID for this DDP transfer + * @sgl: The scatterlist describing this transfer + * @sgc: The number of sg items * - * Returns : 0 no ddp + * Returns: 0 if the DDP context was not configured */ -static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, - struct scatterlist *sgl, unsigned int sgc) +static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, + struct scatterlist *sgl, unsigned int sgc) { - struct net_device *n = fcoe_netdev(lp); + struct net_device *netdev = fcoe_netdev(lport); - if (n->netdev_ops->ndo_fcoe_ddp_setup) - return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); + if (netdev->netdev_ops->ndo_fcoe_ddp_setup) + return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, + xid, sgl, + sgc); return 0; } -/* - * fcoe_ddp_done - calls LLD's ddp_done through net_device - * @lp: the corresponding fc_lport - * @xid: the exchange id for this ddp transfer +/** + * fcoe_ddp_done() - Call a LLD's ddp_done through the net device + * @lport: The local port to complete DDP on + * @xid: The exchange ID for this DDP transfer * - * Returns : the length of data that have been completed by ddp + * Returns: the length of data that have been completed by DDP */ -static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) +static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) { - struct net_device *n = fcoe_netdev(lp); + struct net_device *netdev = fcoe_netdev(lport); - if (n->netdev_ops->ndo_fcoe_ddp_done) - return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); + if (netdev->netdev_ops->ndo_fcoe_ddp_done) + return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); return 0; } -static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, - u32 did, struct fc_frame *fp, unsigned int op, - void (*resp)(struct fc_seq *, struct fc_frame *, void *), - void *arg, u32 timeout); - -static struct libfc_function_template fcoe_libfc_fcn_templ = { - .frame_send = fcoe_xmit, - .ddp_setup = fcoe_ddp_setup, - .ddp_done = fcoe_ddp_done, - .elsct_send = fcoe_elsct_send, -}; - /** - * fcoe_if_create() - this function creates the fcoe port - * @fcoe: fcoe_interface structure to create an fc_lport instance on - * @parent: device pointer to be the parent in sysfs for the SCSI host - * @npiv: is this a vport? + * fcoe_if_create() - Create a FCoE instance on an interface + * @fcoe: The FCoE interface to create a local port on + * @parent: The device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is a vport or not * - * Creates fc_lport struct and scsi_host for lport, configures lport. + * Creates a fc_lport instance and a Scsi_Host instance and configure them. * - * Returns : The allocated fc_lport or an error pointer + * Returns: The allocated fc_lport or an error pointer */ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, struct device *parent, int npiv) { - int rc; + struct net_device *netdev = fcoe->netdev; struct fc_lport *lport = NULL; struct fcoe_port *port; struct Scsi_Host *shost; - struct net_device *netdev = fcoe->netdev; + int rc; /* * parent is only a vport if npiv is 1, * but we'll only use vport in that case so go ahead and set it @@ -837,7 +876,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, port->fcoe = fcoe; INIT_WORK(&port->destroy_work, fcoe_destroy_work); - /* configure fc_lport, e.g., em */ + /* configure a fc_lport including the exchange manager */ rc = fcoe_lport_config(lport); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " @@ -847,7 +886,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, if (npiv) { FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n", - vport->node_name, vport->port_name); + vport->node_name, vport->port_name); fc_set_wwnn(lport, vport->node_name); fc_set_wwpn(lport, vport->port_name); } @@ -891,7 +930,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, rc = fcoe_em_config(lport); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure the EM " - "for the interface\n"); + "for the interface\n"); goto out_lp_destroy; } } @@ -908,9 +947,11 @@ out: } /** - * fcoe_if_init() - attach to scsi transport + * fcoe_if_init() - Initialization routine for fcoe.ko * - * Returns : 0 on success + * Attaches the SW FCoE transport to the FC transport + * + * Returns: 0 on success */ static int __init fcoe_if_init(void) { @@ -928,9 +969,11 @@ static int __init fcoe_if_init(void) } /** - * fcoe_if_exit() - detach from scsi transport + * fcoe_if_exit() - Tear down fcoe.ko + * + * Detaches the SW FCoE transport from the FC transport * - * Returns : 0 on success + * Returns: 0 on success */ int __exit fcoe_if_exit(void) { @@ -942,8 +985,8 @@ int __exit fcoe_if_exit(void) } /** - * fcoe_percpu_thread_create() - Create a receive thread for an online cpu - * @cpu: cpu index for the online cpu + * fcoe_percpu_thread_create() - Create a receive thread for an online CPU + * @cpu: The CPU index of the CPU to create a receive thread for */ static void fcoe_percpu_thread_create(unsigned int cpu) { @@ -966,8 +1009,8 @@ static void fcoe_percpu_thread_create(unsigned int cpu) } /** - * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu - * @cpu: cpu index the rx thread is to be removed + * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU + * @cpu: The CPU index of the CPU whose receive thread is to be destroyed * * Destroys a per-CPU Rx thread. Any pending skbs are moved to the * current CPU's Rx thread. If the thread being destroyed is bound to @@ -1015,7 +1058,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) } else { /* * The targeted CPU is not initialized and cannot accept - * new skbs. Unlock the targeted CPU and drop the skbs + * new skbs. Unlock the targeted CPU and drop the skbs * on the CPU that is going offline. */ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) @@ -1056,12 +1099,12 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) } /** - * fcoe_cpu_callback() - fcoe cpu hotplug event callback - * @nfb: callback data block - * @action: event triggering the callback - * @hcpu: index for the cpu of this event + * fcoe_cpu_callback() - Handler for CPU hotplug events + * @nfb: The callback data block + * @action: The event triggering the callback + * @hcpu: The index of the CPU that the event is for * - * This creates or destroys per cpu data for fcoe + * This creates or destroys per-CPU data for fcoe * * Returns NOTIFY_OK always. */ @@ -1087,25 +1130,22 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, -}; - /** - * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ - * @skb: the receive skb - * @dev: associated net device - * @ptype: context - * @olddev: last device + * fcoe_rcv() - Receive packets from a net device + * @skb: The received packet + * @netdev: The net device that the packet was received on + * @ptype: The packet type context + * @olddev: The last device net device * - * this function will receive the packet and build fc frame and pass it up + * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a + * FC frame and passes the frame to libfc. * * Returns: 0 for success */ -int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, +int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, struct packet_type *ptype, struct net_device *olddev) { - struct fc_lport *lp; + struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fcoe_interface *fcoe; struct fc_frame_header *fh; @@ -1113,15 +1153,15 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, unsigned int cpu; fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); - lp = fcoe->ctlr.lp; - if (unlikely(lp == NULL)) { - FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); + lport = fcoe->ctlr.lp; + if (unlikely(!lport)) { + FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); goto err2; } - if (!lp->link_up) + if (!lport->link_up) goto err2; - FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " + FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " "data:%p tail:%p end:%p sum:%d dev:%s", skb->len, skb->data_len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), @@ -1129,7 +1169,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, /* check for FCOE packet type */ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { - FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); + FCOE_NETDEV_DBG(netdev, "Wrong FC type frame"); goto err; } @@ -1138,14 +1178,14 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, * and FC headers are pulled into the linear data area. */ if (unlikely((skb->len < FCOE_MIN_FRAME) || - !pskb_may_pull(skb, FCOE_HEADER_LEN))) + !pskb_may_pull(skb, FCOE_HEADER_LEN))) goto err; skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fh = (struct fc_frame_header *) skb_transport_header(skb); fr = fcoe_dev_from_skb(skb); - fr->fr_dev = lp; + fr->fr_dev = lport; fr->ptype = ptype; /* @@ -1167,7 +1207,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, * the first CPU now. For non-SMP systems this * will check the same CPU twice. */ - FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " + FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " "ready for incoming skb- using first online " "CPU.\n"); @@ -1194,7 +1234,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, return 0; err: - fc_lport_get_stats(lp)->ErrorFrames++; + fc_lport_get_stats(lport)->ErrorFrames++; err2: kfree_skb(skb); @@ -1202,8 +1242,11 @@ err2: } /** - * fcoe_start_io() - pass to netdev to start xmit for fcoe - * @skb: the skb to be xmitted + * fcoe_start_io() - Start FCoE I/O + * @skb: The packet to be transmitted + * + * This routine is called from the net device to start transmitting + * FCoE packets. * * Returns: 0 for success */ @@ -1220,9 +1263,15 @@ static inline int fcoe_start_io(struct sk_buff *skb) } /** - * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof - * @skb: the skb to be xmitted - * @tlen: total len + * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * @skb: The packet to be transmitted + * @tlen: The total length of the trailer + * + * This routine allocates a page for frame trailers. The page is re-used if + * there is enough room left on it for the current trailer. If there isn't + * enough buffer left a new page is allocated for the trailer. Reference to + * the page from this function as well as the skbs using the page fragments + * ensure that the page is freed at the appropriate time. * * Returns: 0 for success */ @@ -1261,11 +1310,12 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) } /** - * fcoe_fc_crc() - calculates FC CRC in this fcoe skb - * @fp: the fc_frame containing data to be checksummed + * fcoe_fc_crc() - Calculates the CRC for a given frame + * @fp: The frame to be checksumed * - * This uses crc32() to calculate the crc for port frame - * Return : 32 bit crc + * This uses crc32() routine to calculate the CRC for a frame + * + * Return: The 32 bit CRC value */ u32 fcoe_fc_crc(struct fc_frame *fp) { @@ -1296,13 +1346,13 @@ u32 fcoe_fc_crc(struct fc_frame *fp) } /** - * fcoe_xmit() - FCoE frame transmit function - * @lp: the associated local fcoe - * @fp: the fc_frame to be transmitted + * fcoe_xmit() - Transmit a FCoE frame + * @lport: The local port that the frame is to be transmitted for + * @fp: The frame to be transmitted * - * Return : 0 for success + * Return: 0 for success */ -int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) +int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) { int wlen; u32 crc; @@ -1314,7 +1364,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) unsigned int hlen; /* header length implies the version */ unsigned int tlen; /* trailer length */ unsigned int elen; /* eth header, may include vlan */ - struct fcoe_port *port = lport_priv(lp); + struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->fcoe; u8 sof, eof; struct fcoe_hdr *hp; @@ -1325,13 +1375,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb = fp_skb(fp); wlen = skb->len / FCOE_WORD_TO_BYTE; - if (!lp->link_up) { + if (!lport->link_up) { kfree_skb(skb); return 0; } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fcoe->ctlr, lp, skb)) + fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) return 0; sof = fr_sof(fp); @@ -1343,7 +1393,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; /* crc offload */ - if (likely(lp->crc_offload)) { + if (likely(lport->crc_offload)) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb); skb->csum_offset = skb->len; @@ -1405,7 +1455,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) hp->fcoe_sof = sof; /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ - if (lp->seq_offload && fr_max_payload(fp)) { + if (lport->seq_offload && fr_max_payload(fp)) { skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; skb_shinfo(skb)->gso_size = fr_max_payload(fp); } else { @@ -1413,23 +1463,23 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb_shinfo(skb)->gso_size = 0; } /* update tx stats: regardless if LLD fails */ - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); stats->TxFrames++; stats->TxWords += wlen; /* send down to lld */ - fr_dev(fp) = lp; + fr_dev(fp) = lport; if (port->fcoe_pending_queue.qlen) - fcoe_check_wait_queue(lp, skb); + fcoe_check_wait_queue(lport, skb); else if (fcoe_start_io(skb)) - fcoe_check_wait_queue(lp, skb); + fcoe_check_wait_queue(lport, skb); return 0; } /** - * fcoe_percpu_flush_done() - Indicate percpu queue flush completion. - * @skb: the skb being completed. + * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion + * @skb: The completed skb (argument required by destructor) */ static void fcoe_percpu_flush_done(struct sk_buff *skb) { @@ -1437,8 +1487,8 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) } /** - * fcoe_percpu_receive_thread() - recv thread per cpu - * @arg: ptr to the fcoe per cpu struct + * fcoe_percpu_receive_thread() - The per-CPU packet receive thread + * @arg: The per-CPU context * * Return: 0 for success */ @@ -1446,7 +1496,7 @@ int fcoe_percpu_receive_thread(void *arg) { struct fcoe_percpu_s *p = arg; u32 fr_len; - struct fc_lport *lp; + struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fcoe_dev_stats *stats; struct fc_frame_header *fh; @@ -1473,8 +1523,8 @@ int fcoe_percpu_receive_thread(void *arg) } spin_unlock_bh(&p->fcoe_rx_list.lock); fr = fcoe_dev_from_skb(skb); - lp = fr->fr_dev; - if (unlikely(lp == NULL)) { + lport = fr->fr_dev; + if (unlikely(!lport)) { if (skb->destructor != fcoe_percpu_flush_done) FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); kfree_skb(skb); @@ -1491,7 +1541,7 @@ int fcoe_percpu_receive_thread(void *arg) /* * Save source MAC address before discarding header. */ - port = lport_priv(lp); + port = lport_priv(lport); if (skb_is_nonlinear(skb)) skb_linearize(skb); /* not ideal */ mac = eth_hdr(skb)->h_source; @@ -1503,7 +1553,7 @@ int fcoe_percpu_receive_thread(void *arg) hp = (struct fcoe_hdr *) skb_network_header(skb); fh = (struct fc_frame_header *) skb_transport_header(skb); - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { if (stats->ErrorFrames < 5) printk(KERN_WARNING "fcoe: FCoE version " @@ -1525,7 +1575,7 @@ int fcoe_percpu_receive_thread(void *arg) fp = (struct fc_frame *)skb; fc_frame_init(fp); - fr_dev(fp) = lp; + fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; /* Copy out the CRC and EOF trailer for access */ @@ -1545,7 +1595,8 @@ int fcoe_percpu_receive_thread(void *arg) * it's solicited data, in which case, the FCP layer would * check it during the copy. */ - if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) + if (lport->crc_offload && + skb->ip_summed == CHECKSUM_UNNECESSARY) fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; else fr_flags(fp) |= FCPHF_CRC_UNCHECKED; @@ -1553,7 +1604,7 @@ int fcoe_percpu_receive_thread(void *arg) fh = fc_frame_header_get(fp); if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { - fc_exch_recv(lp, fp); + fc_exch_recv(lport, fp); continue; } if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { @@ -1569,27 +1620,27 @@ int fcoe_percpu_receive_thread(void *arg) } fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; } - fc_exch_recv(lp, fp); + fc_exch_recv(lport, fp); } return 0; } /** - * fcoe_check_wait_queue() - attempt to clear the transmit backlog - * @lp: the fc_lport + * fcoe_check_wait_queue() - Attempt to clear the transmit backlog + * @lport: The local port whose backlog is to be cleared * - * This empties the wait_queue, dequeue the head of the wait_queue queue - * and calls fcoe_start_io() for each packet, if all skb have been - * transmitted, return qlen or -1 if a error occurs, then restore - * wait_queue and try again later. + * This empties the wait_queue, dequeues the head of the wait_queue queue + * and calls fcoe_start_io() for each packet. If all skb have been + * transmitted it returns the qlen. If an error occurs it restores + * wait_queue (to try again later) and returns -1. * - * The wait_queue is used when the skb transmit fails. skb will go - * in the wait_queue which will be emptied by the timer function or + * The wait_queue is used when the skb transmit fails. The failed skb + * will go in the wait_queue which will be emptied by the timer function or * by the next skb transmit. */ -static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) +static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) { - struct fcoe_port *port = lport_priv(lp); + struct fcoe_port *port = lport_priv(lport); int rc; spin_lock_bh(&port->fcoe_pending_queue.lock); @@ -1621,19 +1672,19 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) } if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) - lp->qfull = 0; + lport->qfull = 0; if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) mod_timer(&port->timer, jiffies + 2); port->fcoe_pending_queue_active = 0; out: if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lp->qfull = 1; + lport->qfull = 1; spin_unlock_bh(&port->fcoe_pending_queue.lock); return; } /** - * fcoe_dev_setup() - setup link change notification interface + * fcoe_dev_setup() - Setup the link change notification interface */ static void fcoe_dev_setup(void) { @@ -1641,7 +1692,7 @@ static void fcoe_dev_setup(void) } /** - * fcoe_dev_cleanup() - cleanup link change notification interface + * fcoe_dev_cleanup() - Cleanup the link change notification interface */ static void fcoe_dev_cleanup(void) { @@ -1649,19 +1700,19 @@ static void fcoe_dev_cleanup(void) } /** - * fcoe_device_notification() - netdev event notification callback - * @notifier: context of the notification - * @event: type of event - * @ptr: fixed array for output parsed ifname + * fcoe_device_notification() - Handler for net device events + * @notifier: The context of the notification + * @event: The type of event + * @ptr: The net device that the event was on * - * This function is called by the ethernet driver in case of link change event + * This function is called by the Ethernet driver in case of link change event. * * Returns: 0 for success */ static int fcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { - struct fc_lport *lp = NULL; + struct fc_lport *lport = NULL; struct net_device *netdev = ptr; struct fcoe_interface *fcoe; struct fcoe_port *port; @@ -1672,11 +1723,11 @@ static int fcoe_device_notification(struct notifier_block *notifier, list_for_each_entry(fcoe, &fcoe_hostlist, list) { if (fcoe->netdev == netdev) { - lp = fcoe->ctlr.lp; + lport = fcoe->ctlr.lp; break; } } - if (lp == NULL) { + if (!lport) { rc = NOTIFY_DONE; goto out; } @@ -1695,7 +1746,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); if (mfs >= FC_MIN_MAX_FRAME) - fc_set_mfs(lp, mfs); + fc_set_mfs(lport, mfs); break; case NETDEV_REGISTER: break; @@ -1710,22 +1761,22 @@ static int fcoe_device_notification(struct notifier_block *notifier, FCOE_NETDEV_DBG(netdev, "Unknown event %ld " "from netdev netlink\n", event); } - if (link_possible && !fcoe_link_ok(lp)) + if (link_possible && !fcoe_link_ok(lport)) fcoe_ctlr_link_up(&fcoe->ctlr); else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); stats->LinkFailureCount++; - fcoe_clean_pending_queue(lp); + fcoe_clean_pending_queue(lport); } out: return rc; } /** - * fcoe_if_to_netdev() - parse a name buffer to get netdev - * @buffer: incoming buffer to be copied + * fcoe_if_to_netdev() - Parse a name buffer to get a net device + * @buffer: The name of the net device * - * Returns: NULL or ptr to net_device + * Returns: NULL or a ptr to net_device */ static struct net_device *fcoe_if_to_netdev(const char *buffer) { @@ -1743,9 +1794,11 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) } /** - * fcoe_destroy() - handles the destroy from sysfs - * @buffer: expected to be an eth if name - * @kp: associated kernel param + * fcoe_destroy() - Destroy a FCoE interface + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. * * Returns: 0 for success */ @@ -1792,6 +1845,10 @@ out_nodev: return rc; } +/** + * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context + * @work: Handle to the FCoE port to be destroyed + */ static void fcoe_destroy_work(struct work_struct *work) { struct fcoe_port *port; @@ -1803,9 +1860,11 @@ static void fcoe_destroy_work(struct work_struct *work) } /** - * fcoe_create() - Handles the create call from sysfs - * @buffer: expected to be an eth if name - * @kp: associated kernel param + * fcoe_create() - Create a fcoe interface + * @buffer: The name of the Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. * * Returns: 0 for success */ @@ -1884,16 +1943,9 @@ out_nodev: return rc; } -module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(create, "string"); -MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); -module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(destroy, "string"); -MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); - /** - * fcoe_link_ok() - Check if link is ok for the fc_lport - * @lp: ptr to the fc_lport + * fcoe_link_ok() - Check if the link is OK for a local port + * @lport: The local port to check link on * * Any permanently-disqualifying conditions have been previously checked. * This also updates the speed setting, which may change with link for 100/1000. @@ -1905,26 +1957,26 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); * Returns: 0 if link is OK for use by FCoE. * */ -int fcoe_link_ok(struct fc_lport *lp) +int fcoe_link_ok(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lp); - struct net_device *dev = port->fcoe->netdev; + struct fcoe_port *port = lport_priv(lport); + struct net_device *netdev = port->fcoe->netdev; struct ethtool_cmd ecmd = { ETHTOOL_GSET }; - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && - (!dev_ethtool_get_settings(dev, &ecmd))) { - lp->link_supported_speeds &= + if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) && + (!dev_ethtool_get_settings(netdev, &ecmd))) { + lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); if (ecmd.supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; + lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; if (ecmd.supported & SUPPORTED_10000baseT_Full) - lp->link_supported_speeds |= + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; if (ecmd.speed == SPEED_1000) - lp->link_speed = FC_PORTSPEED_1GBIT; + lport->link_speed = FC_PORTSPEED_1GBIT; if (ecmd.speed == SPEED_10000) - lp->link_speed = FC_PORTSPEED_10GBIT; + lport->link_speed = FC_PORTSPEED_10GBIT; return 0; } @@ -1932,8 +1984,8 @@ int fcoe_link_ok(struct fc_lport *lp) } /** - * fcoe_percpu_clean() - Clear the pending skbs for an lport - * @lp: the fc_lport + * fcoe_percpu_clean() - Clear all pending skbs for an local port + * @lport: The local port whose skbs are to be cleared * * Must be called with fcoe_create_mutex held to single-thread completion. * @@ -1942,7 +1994,7 @@ int fcoe_link_ok(struct fc_lport *lp) * there no packets that will be handled by the lport, but also that any * threads already handling packet have returned. */ -void fcoe_percpu_clean(struct fc_lport *lp) +void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; struct fcoe_rcv_info *fr; @@ -1960,7 +2012,7 @@ void fcoe_percpu_clean(struct fc_lport *lp) skb = next) { next = skb->next; fr = fcoe_dev_from_skb(skb); - if (fr->fr_dev == lp) { + if (fr->fr_dev == lport) { __skb_unlink(skb, list); kfree_skb(skb); } @@ -1989,13 +2041,11 @@ void fcoe_percpu_clean(struct fc_lport *lp) /** * fcoe_clean_pending_queue() - Dequeue a skb and free it - * @lp: the corresponding fc_lport - * - * Returns: none + * @lport: The local port to dequeue a skb on */ -void fcoe_clean_pending_queue(struct fc_lport *lp) +void fcoe_clean_pending_queue(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lp); + struct fcoe_port *port = lport_priv(lport); struct sk_buff *skb; spin_lock_bh(&port->fcoe_pending_queue.lock); @@ -2008,10 +2058,10 @@ void fcoe_clean_pending_queue(struct fc_lport *lp) } /** - * fcoe_reset() - Resets the fcoe - * @shost: shost the reset is from + * fcoe_reset() - Reset a local port + * @shost: The SCSI host associated with the local port to be reset * - * Returns: always 0 + * Returns: Always 0 (return value required by FC transport template) */ int fcoe_reset(struct Scsi_Host *shost) { @@ -2021,30 +2071,33 @@ int fcoe_reset(struct Scsi_Host *shost) } /** - * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device - * @dev: this is currently ptr to net_device + * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device + * @netdev: The net device used as a key * - * Returns: NULL or the located fcoe_port - * Locking: must be called with the RNL mutex held + * Locking: Must be called with the RNL mutex held. + * + * Returns: NULL or the FCoE interface */ static struct fcoe_interface * -fcoe_hostlist_lookup_port(const struct net_device *dev) +fcoe_hostlist_lookup_port(const struct net_device *netdev) { struct fcoe_interface *fcoe; list_for_each_entry(fcoe, &fcoe_hostlist, list) { - if (fcoe->netdev == dev) + if (fcoe->netdev == netdev) return fcoe; } return NULL; } /** - * fcoe_hostlist_lookup() - Find the corresponding lport by netdev - * @netdev: ptr to net_device + * fcoe_hostlist_lookup() - Find the local port associated with a + * given net device + * @netdev: The netdevice used as a key * - * Returns: 0 for success - * Locking: must be called with the RTNL mutex held + * Locking: Must be called with the RTNL mutex held + * + * Returns: NULL or the local port */ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) { @@ -2055,11 +2108,13 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) } /** - * fcoe_hostlist_add() - Add a lport to lports list - * @lp: ptr to the fc_lport to be added + * fcoe_hostlist_add() - Add the FCoE interface identified by a local + * port to the hostlist + * @lport: The local port that identifies the FCoE interface to be added * - * Returns: 0 for success * Locking: must be called with the RTNL mutex held + * + * Returns: 0 for success */ static int fcoe_hostlist_add(const struct fc_lport *lport) { @@ -2076,15 +2131,15 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) } /** - * fcoe_init() - fcoe module loading initialization + * fcoe_init() - Initialize fcoe.ko * - * Returns 0 on success, negative on failure + * Returns: 0 on success, or a negative value on failure */ static int __init fcoe_init(void) { + struct fcoe_percpu_s *p; unsigned int cpu; int rc = 0; - struct fcoe_percpu_s *p; mutex_lock(&fcoe_config_mutex); @@ -2121,15 +2176,15 @@ out_free: module_init(fcoe_init); /** - * fcoe_exit() - fcoe module unloading cleanup + * fcoe_exit() - Clean up fcoe.ko * - * Returns 0 on success, negative on failure + * Returns: 0 on success or a negative value on failure */ static void __exit fcoe_exit(void) { - unsigned int cpu; struct fcoe_interface *fcoe, *tmp; struct fcoe_port *port; + unsigned int cpu; mutex_lock(&fcoe_config_mutex); @@ -2230,10 +2285,12 @@ static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) * * Most of the work here is just handed off to the libfc routine. */ -static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, - u32 did, struct fc_frame *fp, unsigned int op, - void (*resp)(struct fc_seq *, struct fc_frame *, void *), - void *arg, u32 timeout) +static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) { struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->fcoe; @@ -2362,4 +2419,3 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, NULL, NULL, lport->e_d_tov); } - diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 99dfa7c2aeaa..c69b2c56c2d1 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -50,7 +50,7 @@ unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -#define FCOE_LOGGING 0x01 /* General logging, not categorized */ +#define FCOE_LOGGING 0x01 /* General logging, not categorized */ #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ @@ -70,8 +70,13 @@ do { \ printk(KERN_INFO "fcoe: %s: " fmt, \ netdev->name, ##args);) -/* - * this percpu struct for fcoe +/** + * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads + * @thread: The thread context + * @fcoe_rx_list: The queue of pending packets to process + * @page: The memory page for calculating frame trailer CRCs + * @crc_eof_offset: The offset into the CRC page pointing to available + * memory for a new trailer */ struct fcoe_percpu_s { struct task_struct *thread; @@ -80,38 +85,62 @@ struct fcoe_percpu_s { int crc_eof_offset; }; -/* - * an FCoE interface, 1:1 with netdev +/** + * struct fcoe_interface - A FCoE interface + * @list: Handle for a list of FCoE interfaces + * @netdev: The associated net device + * @fcoe_packet_type: FCoE packet type + * @fip_packet_type: FIP packet type + * @ctlr: The FCoE controller (for FIP) + * @oem: The offload exchange manager for all local port + * instances associated with this port + * @kref: The kernel reference + * + * This structure is 1:1 with a net devive. */ struct fcoe_interface { - struct list_head list; - struct net_device *netdev; - struct packet_type fcoe_packet_type; - struct packet_type fip_packet_type; - struct fcoe_ctlr ctlr; - struct fc_exch_mgr *oem; /* offload exchange manager */ - struct kref kref; + struct list_head list; + struct net_device *netdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct fcoe_ctlr ctlr; + struct fc_exch_mgr *oem; + struct kref kref; }; -/* - * the FCoE private structure that's allocated along with the - * Scsi_Host and libfc fc_lport structures +/** + * struct fcoe_port - The FCoE private structure + * @fcoe: The associated fcoe interface + * @lport: The associated local port + * @fcoe_pending_queue: The pending Rx queue of skbs + * @fcoe_pending_queue_active: Indicates if the pending queue is active + * @timer: The queue timer + * @destroy_work: Handle for work context + * (to prevent RTNL deadlocks) + * @data_srt_addr: Source address for data + * + * An instance of this structure is to be allocated along with the + * Scsi_Host and libfc fc_lport structures. */ struct fcoe_port { struct fcoe_interface *fcoe; - struct fc_lport *lport; - struct sk_buff_head fcoe_pending_queue; - u8 fcoe_pending_queue_active; - struct timer_list timer; /* queue timer */ - struct work_struct destroy_work; /* to prevent rtnl deadlocks */ - u8 data_src_addr[ETH_ALEN]; + struct fc_lport *lport; + struct sk_buff_head fcoe_pending_queue; + u8 fcoe_pending_queue_active; + struct timer_list timer; + struct work_struct destroy_work; + u8 data_src_addr[ETH_ALEN]; }; #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) -static inline struct net_device *fcoe_netdev(const struct fc_lport *lp) +/** + * fcoe_netdev() - Return the net device associated with a local port + * @lport: The local port to get the net device from + */ +static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) { - return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev; + return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev; } #endif /* _FCOE_H_ */ -- cgit v1.2.3-59-g8ed1b From a7bbc7f40aa01eefef3d367349e1e6e87881a305 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 3 Nov 2009 11:47:55 -0800 Subject: [SCSI] fcoe, libfc: use single frame allocation API Cleans up frame allocation APIs to have just single fc_frame_alloc API. Removes _fc_frame_alloc, renames __fc_frame_alloc to _fc_frame_alloc. Modifies fc_fcp_send_data for removed _fc_frame_alloc, fc_fcp_send_data was the only user of removed _fc_frame_alloc. Also Adds check in fc_frame_alloc to do mod by 4 for only non-zero len value. This patch is prep work to fix can_queue reducing in next patch. Single fc_frame_alloc API helps in fixing can_queue reducing in next patch. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 15 ++++----------- drivers/scsi/libfc/fc_frame.c | 6 +++--- include/scsi/fc_frame.h | 16 +++------------- 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 970b54f653b7..567eee7b8609 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -505,18 +505,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, */ if (tlen % 4) using_sg = 0; - if (using_sg) { - fp = _fc_frame_alloc(lport, 0); - if (!fp) - return -ENOMEM; - } else { - fp = fc_frame_alloc(lport, tlen); - if (!fp) - return -ENOMEM; + fp = fc_frame_alloc(lport, using_sg ? 0 : tlen); + if (!fp) + return -ENOMEM; - data = (void *)(fr_hdr(fp)) + - sizeof(struct fc_frame_header); - } + data = fc_frame_header_get(fp) + 1; fh_parm_offset = frame_offset; fr_max_payload(fp) = fsp->max_payload; } diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 4fea369b58ee..79c956501bd9 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -51,7 +51,7 @@ EXPORT_SYMBOL(fc_frame_crc_check); * Allocate a frame intended to be sent via fcoe_xmit. * Get an sk_buff for the frame and set the length. */ -struct fc_frame *__fc_frame_alloc(size_t len) +struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; @@ -67,7 +67,7 @@ struct fc_frame *__fc_frame_alloc(size_t len) skb_put(skb, len); return fp; } -EXPORT_SYMBOL(__fc_frame_alloc); +EXPORT_SYMBOL(_fc_frame_alloc); struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { @@ -77,7 +77,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) fill = payload_len % 4; if (fill != 0) fill = 4 - fill; - fp = __fc_frame_alloc(payload_len + fill); + fp = _fc_frame_alloc(payload_len + fill); if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index ab2f8d41761b..4d3e9c7b7c57 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -100,17 +100,7 @@ static inline void fc_frame_init(struct fc_frame *fp) } struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len); - -struct fc_frame *__fc_frame_alloc(size_t payload_len); - -/* - * Get frame for sending via port. - */ -static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev, - size_t payload_len) -{ - return __fc_frame_alloc(payload_len); -} +struct fc_frame *_fc_frame_alloc(size_t payload_len); /* * Allocate fc_frame structure and buffer. Set the initial length to @@ -124,10 +114,10 @@ static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len) * Note: Since len will often be a constant multiple of 4, * this check will usually be evaluated and eliminated at compile time. */ - if ((len % 4) != 0) + if (len && len % 4) fp = fc_frame_alloc_fill(dev, len); else - fp = _fc_frame_alloc(dev, len); + fp = _fc_frame_alloc(len); return fp; } -- cgit v1.2.3-59-g8ed1b From c46be11a683acc1ccf86883ea906f171b90ff29a Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 3 Nov 2009 11:48:00 -0800 Subject: [SCSI] libfc: reduce can_queue for all FCP frame allocation failures Currently can_queue is reduced only if frame alloc fails during fc_fcp_send_data but frame alloc can fail at several other places in FCP data path and can_queue needs to be reduced for any FCP frame alloc failure. This patch adds fc_fcp_frame_alloc for all FCP frame allocations and if fc_frame_alloc fails in fc_fcp_frame_alloc then reduce can_queue in fc_fcp_frame_alloc, this will reduce can_queue for all FCP frame alloc failures. This required moving fc_fcp_reduce_can_queue up, to build without adding its prototype. Also renamed fc_fcp_reduce_can_queue to fc_fcp_can_queue_ramp_down. Removes fc_fcp_reduce_can_queue calling from fc_fcp_recv since not needed with added fc_fcp_frame_alloc reducing can_queue. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 102 +++++++++++++++++++++++++------------------- 1 file changed, 59 insertions(+), 43 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 567eee7b8609..ac5c148d0182 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -326,6 +326,57 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) } } +/** + * fc_fcp_can_queue_ramp_down() - reduces can_queue + * @lport: lport to reduce can_queue + * + * If we are getting memory allocation failures, then we may + * be trying to execute too many commands. We let the running + * commands complete or timeout, then try again with a reduced + * can_queue. Eventually we will hit the point where we run + * on all reserved structs. + */ +static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; + int can_queue; + + spin_lock_irqsave(lport->host->host_lock, flags); + if (si->throttled) + goto done; + si->throttled = 1; + + can_queue = lport->host->can_queue; + can_queue >>= 1; + if (!can_queue) + can_queue = 1; + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" + "Reducing can_queue to %d.\n", can_queue); +done: + spin_unlock_irqrestore(lport->host->host_lock, flags); +} + +/* + * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer. + * @lport: fc lport struct + * @len: payload length + * + * Allocates fc_frame structure and buffer but if fails to allocate + * then reduce can_queue. + */ +static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, + size_t len) +{ + struct fc_frame *fp; + + fp = fc_frame_alloc(lport, len); + if (!fp) + fc_fcp_can_queue_ramp_down(lport); + return fp; +} + /** * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target * @fsp: The FCP packet the data is on @@ -615,38 +666,6 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) } } -/** - * fc_fcp_reduce_can_queue() - Reduce the can_queue value for a local port - * @lport: The local port to reduce can_queue on - * - * If we are getting memory allocation failures, then we may - * be trying to execute too many commands. We let the running - * commands complete or timeout, then try again with a reduced - * can_queue. Eventually we will hit the point where we run - * on all reserved structs. - */ -static void fc_fcp_reduce_can_queue(struct fc_lport *lport) -{ - struct fc_fcp_internal *si = fc_get_scsi_internal(lport); - unsigned long flags; - int can_queue; - - spin_lock_irqsave(lport->host->host_lock, flags); - if (si->throttled) - goto done; - si->throttled = 1; - - can_queue = lport->host->can_queue; - can_queue >>= 1; - if (!can_queue) - can_queue = 1; - lport->host->can_queue = can_queue; - shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" - "Reducing can_queue to %d.\n", can_queue); -done: - spin_unlock_irqrestore(lport->host->host_lock, flags); -} - /** * fc_fcp_recv() - Reveive an FCP frame * @seq: The sequence the frame is on @@ -665,8 +684,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) u8 r_ctl; int rc = 0; - if (IS_ERR(fp)) - goto errout; + if (IS_ERR(fp)) { + fc_fcp_error(fsp, fp); + return; + } fh = fc_frame_header_get(fp); r_ctl = fh->fh_r_ctl; @@ -720,11 +741,6 @@ unlock: fc_fcp_unlock_pkt(fsp); out: fc_frame_free(fp); -errout: - if (IS_ERR(fp)) - fc_fcp_error(fsp, fp); - else if (rc == -ENOMEM) - fc_fcp_reduce_can_queue(lport); } /** @@ -886,7 +902,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) struct fc_seq *csp; csp = lport->tt.seq_start_next(seq); - conf_frame = fc_frame_alloc(fsp->lp, 0); + conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); if (conf_frame) { f_ctl = FC_FC_SEQ_INIT; f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; @@ -1026,7 +1042,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, if (fc_fcp_lock_pkt(fsp)) return 0; - fp = fc_frame_alloc(lport, sizeof(fsp->cdb_cmd)); + fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd)); if (!fp) { rc = -1; goto unlock; @@ -1306,7 +1322,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) fc_fcp_complete_locked(fsp); return; } - fp = fc_frame_alloc(lport, sizeof(struct fc_els_rec)); + fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); if (!fp) goto retry; @@ -1557,7 +1573,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || rpriv->rp_state != RPORT_ST_READY) goto retry; /* shouldn't happen */ - fp = fc_frame_alloc(lport, sizeof(*srr)); + fp = fc_fcp_frame_alloc(lport, sizeof(*srr)); if (!fp) goto retry; -- cgit v1.2.3-59-g8ed1b From 84c3e1ad08d4be018a95e7a9964bf3dbc8cf8857 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 3 Nov 2009 11:48:06 -0800 Subject: [SCSI] libfc: adds can_queue ramp up Adds last_can_queue_ramp_down_time and updates this on every ramp down. If last_can_queue_ramp_down_time is not zero then do ramp up on any IO completion in added fc_fcp_can_queue_ramp_up. Reset last_can_queue_ramp_down_time to zero once can_queue is ramped up to added max_can_queue limit, this is to avoid any more ramp up attempts on subsequent IO completion. The ramp down and up are skipped for FC_CAN_QUEUE_PERIOD to avoid infrequent changes to can_queue, this required keeping track of ramp up time also in last_can_queue_ramp_up_time. Adds code to ramp down can_queue if lp->qfull is set, with added new ramp up code the can_queue will be increased after FC_CAN_QUEUE_PERIOD, therefore it is safe to do ramp down without fsp in this case and will avoid thrash. This required fc_fcp_can_queue_ramp_down locking change so that it can be called with Scsi_Host lock held. Removes si->throttled and fsp state FC_SRB_NOMEM, not needed with added ramp up code. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 78 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 59 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index ac5c148d0182..4bfab4f0ccb3 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -52,7 +52,6 @@ struct kmem_cache *scsi_pkt_cachep; #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ -#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ #define FC_SRB_READ (1 << 1) #define FC_SRB_WRITE (1 << 0) @@ -71,12 +70,16 @@ struct kmem_cache *scsi_pkt_cachep; * struct fc_fcp_internal - FCP layer internal data * @scsi_pkt_pool: Memory pool to draw FCP packets from * @scsi_pkt_queue: Current FCP packets - * @throttled: The FCP packet queue is throttled + * @last_can_queue_ramp_down_time: ramp down time + * @last_can_queue_ramp_up_time: ramp up time + * @max_can_queue: max can_queue size */ struct fc_fcp_internal { mempool_t *scsi_pkt_pool; struct list_head scsi_pkt_queue; - u8 throttled; + unsigned long last_can_queue_ramp_down_time; + unsigned long last_can_queue_ramp_up_time; + int max_can_queue; }; #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) @@ -124,6 +127,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_SCSI_TM_TOV (10 * HZ) #define FC_SCSI_REC_TOV (2 * HZ) #define FC_HOST_RESET_TIMEOUT (30 * HZ) +#define FC_CAN_QUEUE_PERIOD (60 * HZ) #define FC_MAX_ERROR_CNT 5 #define FC_MAX_RECOV_RETRY 3 @@ -326,6 +330,38 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) } } +/** + * fc_fcp_can_queue_ramp_up() - increases can_queue + * @lport: lport to ramp up can_queue + * + * Locking notes: Called with Scsi_Host lock held + */ +static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + int can_queue; + + if (si->last_can_queue_ramp_up_time && + (time_before(jiffies, si->last_can_queue_ramp_up_time + + FC_CAN_QUEUE_PERIOD))) + return; + + if (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD)) + return; + + si->last_can_queue_ramp_up_time = jiffies; + + can_queue = lport->host->can_queue << 1; + if (can_queue >= si->max_can_queue) { + can_queue = si->max_can_queue; + si->last_can_queue_ramp_down_time = 0; + } + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: increased " + "can_queue to %d.\n", can_queue); +} + /** * fc_fcp_can_queue_ramp_down() - reduces can_queue * @lport: lport to reduce can_queue @@ -335,17 +371,20 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) * commands complete or timeout, then try again with a reduced * can_queue. Eventually we will hit the point where we run * on all reserved structs. + * + * Locking notes: Called with Scsi_Host lock held */ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) { struct fc_fcp_internal *si = fc_get_scsi_internal(lport); - unsigned long flags; int can_queue; - spin_lock_irqsave(lport->host->host_lock, flags); - if (si->throttled) - goto done; - si->throttled = 1; + if (si->last_can_queue_ramp_down_time && + (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD))) + return; + + si->last_can_queue_ramp_down_time = jiffies; can_queue = lport->host->can_queue; can_queue >>= 1; @@ -354,8 +393,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) lport->host->can_queue = can_queue; shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" "Reducing can_queue to %d.\n", can_queue); -done: - spin_unlock_irqrestore(lport->host->host_lock, flags); } /* @@ -370,10 +407,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, size_t len) { struct fc_frame *fp; + unsigned long flags; fp = fc_frame_alloc(lport, len); - if (!fp) + if (!fp) { + spin_lock_irqsave(lport->host->host_lock, flags); fc_fcp_can_queue_ramp_down(lport); + spin_unlock_irqrestore(lport->host->host_lock, flags); + } return fp; } @@ -720,8 +761,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) (size_t) ntohl(dd->ft_burst_len)); if (!rc) seq->rec_data = fsp->xfer_len; - else if (rc == -ENOMEM) - fsp->state |= FC_SRB_NOMEM; } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { /* * received a DATA frame @@ -1734,6 +1773,8 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) rpriv = rport->dd_data; if (!fc_fcp_lport_queue_ready(lport)) { + if (lport->qfull) + fc_fcp_can_queue_ramp_down(lport); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -1830,13 +1871,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) } /* - * if a command timed out while we had to try and throttle IO - * and it is now getting cleaned up, then we are about to - * try again so clear the throttled flag incase we get more - * time outs. + * if can_queue ramp down is done then try can_queue ramp up + * since commands are completing now. */ - if (si->throttled && fsp->state & FC_SRB_NOMEM) - si->throttled = 0; + if (si->last_can_queue_ramp_down_time) + fc_fcp_can_queue_ramp_up(lport); sc_cmd = fsp->cmd; fsp->cmd = NULL; @@ -2176,6 +2215,7 @@ int fc_fcp_init(struct fc_lport *lport) if (!si) return -ENOMEM; lport->scsi_priv = si; + si->max_can_queue = lport->host->can_queue; INIT_LIST_HEAD(&si->scsi_pkt_queue); si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); -- cgit v1.2.3-59-g8ed1b From 22bcd225bfe2107725228758137d2109befa942a Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:11 -0800 Subject: [SCSI] libfcoe: Allow FIP to be disabled by the driver Allow FIP to be disabled by the driver for devices that want to use libfcoe in non-FIP mode. The driver merely sets the fcoe_ctlr mode to the state which should be entered when the link comes up. The default is auto. No change is needed for fcoe.c which uses auto mode. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 6 ++++-- include/scsi/libfcoe.h | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 6b07a8400889..1ea17a3c8749 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -109,6 +109,7 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) void fcoe_ctlr_init(struct fcoe_ctlr *fip) { fip->state = FIP_ST_LINK_WAIT; + fip->mode = FIP_ST_AUTO; INIT_LIST_HEAD(&fip->fcfs); spin_lock_init(&fip->lock); fip->flogi_oxid = FC_XID_UNKNOWN; @@ -261,11 +262,12 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) spin_unlock_bh(&fip->lock); fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { - fip->state = FIP_ST_AUTO; + fip->state = fip->mode; fip->last_link = 1; fip->link = 1; spin_unlock_bh(&fip->lock); - LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); + if (fip->state == FIP_ST_AUTO) + LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); fc_linkup(fip->lp); fcoe_ctlr_solicit(fip, NULL); } else diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 76d08c9a7678..2344a00e92ef 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -55,6 +55,7 @@ enum fip_state { /** * struct fcoe_ctlr - FCoE Controller and FIP state * @state: internal FIP state for network link and FIP or non-FIP mode. + * @mode: LLD-selected mode. * @lp: &fc_lport: libfc local port. * @sel_fcf: currently selected FCF, or NULL. * @fcfs: list of discovered FCFs. @@ -89,6 +90,7 @@ enum fip_state { */ struct fcoe_ctlr { enum fip_state state; + enum fip_state mode; struct fc_lport *lp; struct fcoe_fcf *sel_fcf; struct list_head fcfs; -- cgit v1.2.3-59-g8ed1b From 0f51c2e54c0bfdb6b02c53f6d7dd9b35f91821b6 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:16 -0800 Subject: [SCSI] libfcoe: fip: use SCSI host number to identify debug messages. Use scsi host number to identify debug messages. Previously, no instance information was given, so if multiple ports were active, it became confusing. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 47 +++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 1ea17a3c8749..99f583f40df3 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -74,9 +74,10 @@ do { \ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ printk(KERN_INFO "libfcoe: " fmt, ##args);) -#define LIBFCOE_FIP_DBG(fmt, args...) \ +#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ - printk(KERN_INFO "fip: " fmt, ##args);) + printk(KERN_INFO "host%d: fip: " fmt, \ + (fip)->lp->host->host_no, ##args);) /** * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid @@ -267,7 +268,7 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) fip->link = 1; spin_unlock_bh(&fip->lock); if (fip->state == FIP_ST_AUTO) - LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); + LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); fc_linkup(fip->lp); fcoe_ctlr_solicit(fip, NULL); } else @@ -604,13 +605,15 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) /** * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry + * @fip: The FCoE controller receiving the advertisement * @skb: The received FIP advertisement frame * @fcf: The resulting FCF entry * * Returns zero on a valid parsed advertisement, * otherwise returns non zero value. */ -static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) +static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, + struct sk_buff *skb, struct fcoe_fcf *fcf) { struct fip_header *fiph; struct fip_desc *desc = NULL; @@ -649,7 +652,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); if (!is_valid_ether_addr(fcf->fcf_mac)) { - LIBFCOE_FIP_DBG("Invalid MAC address " + LIBFCOE_FIP_DBG(fip, "Invalid MAC address " "in FIP adv\n"); return -EINVAL; } @@ -683,7 +686,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) case FIP_DT_LOGO: case FIP_DT_ELP: default: - LIBFCOE_FIP_DBG("unexpected descriptor type %x " + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_VENDOR_BASE) @@ -700,7 +703,7 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) return 0; len_err: - LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", desc->fip_dtype, dlen); return -EINVAL; } @@ -719,7 +722,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) int first = 0; int mtu_valid; - if (fcoe_ctlr_parse_adv(skb, &new)) + if (fcoe_ctlr_parse_adv(fip, skb, &new)) return; spin_lock_bh(&fip->lock); @@ -765,7 +768,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) mtu_valid = fcoe_ctlr_mtu_valid(fcf); fcf->time = jiffies; if (!found) { - LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", + LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n", fcf->fabric_name, fcf->fc_map, mtu_valid); } @@ -844,7 +847,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); if (!is_valid_ether_addr(granted_mac)) { - LIBFCOE_FIP_DBG("Invalid MAC address " + LIBFCOE_FIP_DBG(fip, "Invalid MAC address " "in FIP ELS\n"); goto drop; } @@ -864,7 +867,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) els_dtype = desc->fip_dtype; break; default: - LIBFCOE_FIP_DBG("unexpected descriptor type %x " + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_VENDOR_BASE) @@ -903,7 +906,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) return; len_err: - LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", desc->fip_dtype, dlen); drop: kfree_skb(skb); @@ -930,7 +933,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, struct fc_lport *lport = fip->lp; u32 desc_mask; - LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); + LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); if (!fcf) return; if (!fcf || !fc_host_port_id(lport->host)) @@ -989,9 +992,10 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * reset only if all required descriptors were present and valid. */ if (desc_mask) { - LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); + LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", + desc_mask); } else { - LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); + LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); fcoe_ctlr_reset(fip, FIP_ST_ENABLED); } } @@ -1050,7 +1054,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) fip->map_dest = 0; fip->state = FIP_ST_ENABLED; state = FIP_ST_ENABLED; - LIBFCOE_FIP_DBG("Using FIP mode\n"); + LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } spin_unlock_bh(&fip->lock); if (state != FIP_ST_ENABLED) @@ -1085,11 +1089,11 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) struct fcoe_fcf *best = NULL; list_for_each_entry(fcf, &fip->fcfs, list) { - LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " + LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x " "val %d\n", fcf->fabric_name, fcf->vfid, fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); if (!fcoe_ctlr_fcf_usable(fcf)) { - LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " + LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid " "%savailable\n", fcf->fabric_name, fcf->fc_map, (fcf->flags & FIP_FL_SOL) ? "" : "in", (fcf->flags & FIP_FL_AVAIL) @@ -1103,7 +1107,7 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) if (fcf->fabric_name != best->fabric_name || fcf->vfid != best->vfid || fcf->fc_map != best->fc_map) { - LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " + LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return; } @@ -1292,7 +1296,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, return -EINVAL; } fip->state = FIP_ST_NON_FIP; - LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); + LIBFCOE_FIP_DBG(fip, + "received FLOGI LS_ACC using non-FIP mode\n"); /* * FLOGI accepted. @@ -1319,7 +1324,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, memcpy(fip->dest_addr, sa, ETH_ALEN); fip->map_dest = 0; if (fip->state == FIP_ST_NON_FIP) - LIBFCOE_FIP_DBG("received FLOGI REQ, " + LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, " "using non-FIP mode\n"); fip->state = FIP_ST_NON_FIP; } -- cgit v1.2.3-59-g8ed1b From 1f4aed818d26eb9ed54520fbeb85d5ee691baa94 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:22 -0800 Subject: [SCSI] libfcoe: fip: allow FIP receive to be called from IRQ. FIP's fcoe_ctlr_recv() function was previously only called from the soft IRQ in FCoE. It's not performance critical and is more convenient for some drivers to call it from the IRQ level. Just Change to use skb_queue()/dequeue() which uses spinlock_irqsave instead of separate locking with _bh locks. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 99f583f40df3..787e7225ddde 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -156,9 +156,7 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) { cancel_work_sync(&fip->recv_work); - spin_lock_bh(&fip->fip_recv_list.lock); - __skb_queue_purge(&fip->fip_recv_list); - spin_unlock_bh(&fip->fip_recv_list.lock); + skb_queue_purge(&fip->fip_recv_list); spin_lock_bh(&fip->lock); fip->state = FIP_ST_DISABLED; @@ -1005,13 +1003,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * @fip: The FCoE controller that received the packet * @skb: The received FIP packet * - * This is called from NET_RX_SOFTIRQ. + * This may be called from either NET_RX_SOFTIRQ or IRQ. */ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { - spin_lock_bh(&fip->fip_recv_list.lock); - __skb_queue_tail(&fip->fip_recv_list, skb); - spin_unlock_bh(&fip->fip_recv_list.lock); + skb_queue_tail(&fip->fip_recv_list, skb); schedule_work(&fip->recv_work); } EXPORT_SYMBOL(fcoe_ctlr_recv); @@ -1251,13 +1247,8 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) struct sk_buff *skb; fip = container_of(recv_work, struct fcoe_ctlr, recv_work); - spin_lock_bh(&fip->fip_recv_list.lock); - while ((skb = __skb_dequeue(&fip->fip_recv_list))) { - spin_unlock_bh(&fip->fip_recv_list.lock); + while ((skb = skb_dequeue(&fip->fip_recv_list))) fcoe_ctlr_recv_handler(fip, skb); - spin_lock_bh(&fip->fip_recv_list.lock); - } - spin_unlock_bh(&fip->fip_recv_list.lock); } /** -- cgit v1.2.3-59-g8ed1b From dd42dac4ecd1799077c132aab35d3c36b26d4d8c Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:27 -0800 Subject: [SCSI] libfcoe: FIP should report link to libfc whether selected or not The fnic driver with FIP is reporting link up, even though it's down. When the interface is shut down by the switch, we receive a clear virtual link, and set the state reported to libfc as down, although we still report it up. Clearly wrong. That causes the subsequent link down event not to be reported, and /sys shows the host "Online". Currently, in FIP mode, if an FCF times out, then link to libfc is reported as down, to stop FLOGIs. That interferes with the LLD link down being reported. Users really need to know the physical link information, to diagnose cabling issues, so physical link status should be reported to libfc. If the selected FCF needs to be reported, that should be done separately, in a later patch. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 60 ++++++++++++++++++++++----------------------- include/scsi/libfcoe.h | 1 + 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 787e7225ddde..4d857c2aef6c 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -277,38 +277,16 @@ EXPORT_SYMBOL(fcoe_ctlr_link_up); /** * fcoe_ctlr_reset() - Reset a FCoE controller * @fip: The FCoE controller to reset - * @new_state: The FIP state to be entered - * - * Returns non-zero if the link was up and now isn't. */ -static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) +static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) { - struct fc_lport *lport = fip->lp; - int link_dropped; - - spin_lock_bh(&fip->lock); fcoe_ctlr_reset_fcfs(fip); del_timer(&fip->timer); - fip->state = new_state; fip->ctlr_ka_time = 0; fip->port_ka_time = 0; fip->sol_time = 0; fip->flogi_oxid = FC_XID_UNKNOWN; fip->map_dest = 0; - fip->last_link = 0; - link_dropped = fip->link; - fip->link = 0; - spin_unlock_bh(&fip->lock); - - if (link_dropped) - fc_linkdown(lport); - - if (new_state == FIP_ST_ENABLED) { - fcoe_ctlr_solicit(fip, NULL); - fc_linkup(lport); - link_dropped = 0; - } - return link_dropped; } /** @@ -322,7 +300,20 @@ static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state) */ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) { - return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); + int link_dropped; + + LIBFCOE_FIP_DBG(fip, "link down.\n"); + spin_lock_bh(&fip->lock); + fcoe_ctlr_reset(fip); + link_dropped = fip->link; + fip->link = 0; + fip->last_link = 0; + fip->state = FIP_ST_LINK_WAIT; + spin_unlock_bh(&fip->lock); + + if (link_dropped) + fc_linkdown(fip->lp); + return link_dropped; } EXPORT_SYMBOL(fcoe_ctlr_link_down); @@ -994,7 +985,13 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, desc_mask); } else { LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); - fcoe_ctlr_reset(fip, FIP_ST_ENABLED); + + spin_lock_bh(&fip->lock); + fcoe_ctlr_reset(fip); + spin_unlock_bh(&fip->lock); + + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); } } @@ -1152,15 +1149,14 @@ static void fcoe_ctlr_timeout(unsigned long arg) fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); fip->ctlr_ka_time = jiffies + sel->fka_period; - fip->link = 1; } else { printk(KERN_NOTICE "libfcoe: host%d: " "FIP Fibre-Channel Forwarder timed out. " "Starting FCF discovery.\n", fip->lp->host->host_no); - fip->link = 0; + fip->reset_req = 1; + schedule_work(&fip->link_work); } - schedule_work(&fip->link_work); } if (sel) { @@ -1205,20 +1201,24 @@ static void fcoe_ctlr_link_work(struct work_struct *work) u8 *mac; int link; int last_link; + int reset; fip = container_of(work, struct fcoe_ctlr, link_work); spin_lock_bh(&fip->lock); last_link = fip->last_link; link = fip->link; fip->last_link = link; + reset = fip->reset_req; + fip->reset_req = 0; spin_unlock_bh(&fip->lock); if (last_link != link) { if (link) fc_linkup(fip->lp); else - fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT); - } + fc_linkdown(fip->lp); + } else if (reset && link) + fc_lport_reset(fip->lp); if (fip->send_ctlr_ka) { fip->send_ctlr_ka = 0; diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 2344a00e92ef..e38ffa05dc26 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -108,6 +108,7 @@ struct fcoe_ctlr { u8 flogi_count; u8 link; u8 last_link; + u8 reset_req; u8 map_dest; u8 spma; u8 send_ctlr_ka; -- cgit v1.2.3-59-g8ed1b From f31f2a1c3215e96fbff2152486d0fb590f72634e Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:32 -0800 Subject: [SCSI] libfcoe: don't send ELS in FIP mode if no FCF selected If link is up, but no FCF is selected, don't send any ELS frames. This came up when an fnic received a multicast advertisement but no solitited advertisments, so no FCF was selected. It tried to send FLOGIs anyway. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 4d857c2aef6c..2aab97221c6c 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -500,6 +500,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, if (fip->state == FIP_ST_NON_FIP) return 0; + if (!fip->sel_fcf) + goto drop; switch (op) { case ELS_FLOGI: -- cgit v1.2.3-59-g8ed1b From 4e5ad003ae07999593bb58ffb7ea646700647390 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:48:39 -0800 Subject: [SCSI] fcoe: remove extra function decalrations Remove the two extra function decalartions in fcoe.c. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 5615dfe10bf5..17ce2efc3c19 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -102,9 +102,6 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); static int fcoe_create(const char *, struct kernel_param *); static int fcoe_destroy(const char *, struct kernel_param *); -static u8 *fcoe_get_src_mac(struct fc_lport *); -static void fcoe_destroy_work(struct work_struct *); - static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, unsigned int op, -- cgit v1.2.3-59-g8ed1b From 59d925168457805572f40fb12bd399e89775b3ff Mon Sep 17 00:00:00 2001 From: john fastabend Date: Tue, 3 Nov 2009 11:48:44 -0800 Subject: [SCSI] fcoe: add check to fail gracefully in bonding mode This patch adds a check to fail gracefully when the netdevice is bonded. Previously, the error was detected but the stack would continue to load. This resulted in a partially enabled fcoe intance and errors when the fcoe instance was destroy. Signed-off-by: John Fastabend Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 17ce2efc3c19..b15ec996b477 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -266,6 +266,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, if ((netdev->priv_flags & IFF_MASTER_ALB) || (netdev->priv_flags & IFF_SLAVE_INACTIVE) || (netdev->priv_flags & IFF_MASTER_8023AD)) { + FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); return -EOPNOTSUPP; } @@ -323,6 +324,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) { struct fcoe_interface *fcoe; + int err; fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); if (!fcoe) { @@ -341,7 +343,13 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) fcoe->ctlr.update_mac = fcoe_update_src_mac; fcoe->ctlr.get_src_addr = fcoe_get_src_mac; - fcoe_interface_setup(fcoe, netdev); + err = fcoe_interface_setup(fcoe, netdev); + if (err) { + fcoe_ctlr_destroy(&fcoe->ctlr); + kfree(fcoe); + dev_put(netdev); + return NULL; + } return fcoe; } -- cgit v1.2.3-59-g8ed1b From 6049d95a8a223e2dc3a476dea9f0fbc9b580f38f Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:50 -0800 Subject: [SCSI] libfc: fix RNN_ID smashing skb payload The code that filled in the name server RNN_ID (register node name) request had somehow gotten a line in it from the RFT_ID code which copies 32 bytes of data over the relatively short payload. This caused some corruption and hangs. Simply deleted the extraneous line. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc_encode.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 9afcbb94ec30..c8968d31c610 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -134,7 +134,6 @@ static inline int fc_ct_fill(struct fc_lport *lport, ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); hton24(ct->payload.rn.fr_fid.fp_fid, fc_host_port_id(lport->host)); - ct->payload.rft.fts = lport->fcts; put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); break; -- cgit v1.2.3-59-g8ed1b From 5f9a056db9c7973c46337ec8d034323aa72bf206 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:48:55 -0800 Subject: [SCSI] libfc: fix symbolic name registrations smashing skb data The strncpy for RSPN_ID and RSNN_NN requests was padding past the allocated frame size. Get the string length before filling in the ct header. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc_encode.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index c8968d31c610..ab2260cb149c 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -111,6 +111,7 @@ static inline int fc_ct_fill(struct fc_lport *lport, enum fc_fh_type *fh_type) { struct fc_ct_req *ct; + size_t len; switch (op) { case FC_NS_GPN_FT: @@ -138,22 +139,22 @@ static inline int fc_ct_fill(struct fc_lport *lport, break; case FC_NS_RSPN_ID: - ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn)); + len = strnlen(fc_host_symbolic_name(lport->host), 255); + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len); hton24(ct->payload.spn.fr_fid.fp_fid, fc_host_port_id(lport->host)); strncpy(ct->payload.spn.fr_name, - fc_host_symbolic_name(lport->host), 255); - ct->payload.spn.fr_name_len = - strnlen(ct->payload.spn.fr_name, 255); + fc_host_symbolic_name(lport->host), len); + ct->payload.spn.fr_name_len = len; break; case FC_NS_RSNN_NN: - ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn)); + len = strnlen(fc_host_symbolic_name(lport->host), 255); + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len); put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); strncpy(ct->payload.snn.fr_name, - fc_host_symbolic_name(lport->host), 255); - ct->payload.snn.fr_name_len = - strnlen(ct->payload.snn.fr_name, 255); + fc_host_symbolic_name(lport->host), len); + ct->payload.snn.fr_name_len = len; break; default: -- cgit v1.2.3-59-g8ed1b From 52a6690d3f0cb7414c34b1e26c569b32d4987662 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:00 -0800 Subject: [SCSI] libfc: fix fc_els_resp_type to correct display of CT responses Local port debug messages were using fc_els_resp_type() which showed all CT responses as rejects. Handle CT responses correctly based by inspecting fh_type. I decided not to rename the function to keep the patch smaller. We could call it just fc_resp_type() or fc_elsct_resp_type(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_elsct.c | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 01be43f80f34..53748724f2c5 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -90,6 +90,9 @@ EXPORT_SYMBOL(fc_elsct_init); const char *fc_els_resp_type(struct fc_frame *fp) { const char *msg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + if (IS_ERR(fp)) { switch (-PTR_ERR(fp)) { case FC_NO_ERR: @@ -106,15 +109,41 @@ const char *fc_els_resp_type(struct fc_frame *fp) break; } } else { - switch (fc_frame_payload_op(fp)) { - case ELS_LS_ACC: - msg = "accept"; + fh = fc_frame_header_get(fp); + switch (fh->fh_type) { + case FC_TYPE_ELS: + switch (fc_frame_payload_op(fp)) { + case ELS_LS_ACC: + msg = "accept"; + break; + case ELS_LS_RJT: + msg = "reject"; + break; + default: + msg = "response unknown ELS"; + break; + } break; - case ELS_LS_RJT: - msg = "reject"; + case FC_TYPE_CT: + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (ct) { + switch (ntohs(ct->ct_cmd)) { + case FC_FS_ACC: + msg = "CT accept"; + break; + case FC_FS_RJT: + msg = "CT reject"; + break; + default: + msg = "response unknown CT"; + break; + } + } else { + msg = "short CT response"; + } break; default: - msg = "response unknown ELS"; + msg = "response not ELS or CT"; break; } } -- cgit v1.2.3-59-g8ed1b From 093bb6a2d378ee83fc6ab886c772b6be86abb5a8 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:05 -0800 Subject: [SCSI] libfc: add set_fid function to libfc template This is to notify the LLD when an FC_ID is assigned to the local port. The fnic driver needs to push the assigned FC_ID to firmware. It currently does this by intercepting the FLOGI responses, and in order to make that code more common with FIP and NPIV, it makes more sense to wait until the local port has completely handled the FLOGI or FDISC response. Also, when we fix point-to-point FC_ID assignment, we'll need this callback as well. Add a call to the libfc template, which is called whenever the local port FC_ID is being assigned. It defaults to fc_lport_set_fid(), supplied by libfc. As additional benefit of this function, the LLD may determine the MAC address that caused the change by looking at the received frame. We also print the assigned port ID as long as it isn't 0. Setting port ID to 0 happens often in reset while retrying FLOGI, and would be uninteresting. This replaces the previous message which didn't identify the host adapter instance. patch v2 note: changed one word in a comment. "intercepted" -> "provided". Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 34 +++++++++++++++++++++++++++------- include/scsi/libfc.h | 20 ++++++++++++++++++++ 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 90930c435455..653b52dd2ff7 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -732,6 +732,27 @@ static void fc_lport_enter_ready(struct fc_lport *lport) lport->tt.disc_start(fc_lport_disc_callback, lport); } +/** + * fc_lport_set_port_id() - set the local port Port ID + * @lport: The local port which will have its Port ID set. + * @port_id: The new port ID. + * @fp: The frame containing the incoming request, or NULL. + * + * Locking Note: The lport lock is expected to be held before calling + * this function. + */ +static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, + struct fc_frame *fp) +{ + if (port_id) + printk(KERN_INFO "host%d: Assigned Port ID %6x\n", + lport->host->host_no, port_id); + + fc_host_port_id(lport->host) = port_id; + if (lport->tt.lport_set_port_id) + lport->tt.lport_set_port_id(lport, port_id, fp); +} + /** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @sp_in: The sequence the FLOGI is on @@ -790,7 +811,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, remote_fid = FC_LOCAL_PTP_FID_HI; } - fc_host_port_id(lport->host) = local_fid; + fc_lport_set_port_id(lport, local_fid, rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { @@ -926,7 +947,9 @@ static void fc_lport_reset_locked(struct fc_lport *lport) lport->tt.exch_mgr_reset(lport, 0, 0); fc_host_fabric_name(lport->host) = 0; - fc_host_port_id(lport->host) = 0; + + if (fc_host_port_id(lport->host)) + fc_lport_set_port_id(lport, 0, NULL); } /** @@ -1428,11 +1451,6 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fh = fc_frame_header_get(fp); did = ntoh24(fh->fh_d_id); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { - - printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", - did); - fc_host_port_id(lport->host) = did; - flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1452,6 +1470,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; + fc_lport_set_port_id(lport, did, fp); printk(KERN_INFO "libfc: Port (%6x) entered " "point to point mode\n", did); fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), @@ -1464,6 +1483,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); + fc_lport_set_port_id(lport, did, fp); fc_lport_enter_dns(lport); } } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 310d8a22b726..67ce9fa1fee4 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -580,6 +580,26 @@ struct libfc_function_template { */ int (*lport_reset)(struct fc_lport *); + /* + * Set the local port FC_ID. + * + * This may be provided by the LLD to allow it to be + * notified when the local port is assigned a FC-ID. + * + * The frame, if non-NULL, is the incoming frame with the + * FLOGI LS_ACC or FLOGI, and may contain the granted MAC + * address for the LLD. The frame pointer may be NULL if + * no MAC is associated with this assignment (LOGO or PLOGI). + * + * If FC_ID is non-zero, r_a_tov and e_d_tov must be valid. + * + * Note: this is called with the local port mutex held. + * + * STATUS: OPTIONAL + */ + void (*lport_set_port_id)(struct fc_lport *, u32 port_id, + struct fc_frame *); + /* * Create a remote port with a given port ID * -- cgit v1.2.3-59-g8ed1b From e6d8a1b0b53a156979120dd0593c1867b8ea89d3 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:11 -0800 Subject: [SCSI] libfc: add host number to lport link up/down messages. The libfc link up/down messages don't indicate which port is changing. The Port ID will often be 0. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 653b52dd2ff7..d3aec1959394 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -568,8 +568,8 @@ void __fc_linkup(struct fc_lport *lport) */ void fc_linkup(struct fc_lport *lport) { - printk(KERN_INFO "libfc: Link up on port (%6x)\n", - fc_host_port_id(lport->host)); + printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); mutex_lock(&lport->lp_mutex); __fc_linkup(lport); @@ -598,8 +598,8 @@ void __fc_linkdown(struct fc_lport *lport) */ void fc_linkdown(struct fc_lport *lport) { - printk(KERN_INFO "libfc: Link down on port (%6x)\n", - fc_host_port_id(lport->host)); + printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); mutex_lock(&lport->lp_mutex); __fc_linkdown(lport); @@ -699,8 +699,9 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) FC_LPORT_DBG(lport, "Discovery succeeded\n"); break; case DISC_EV_FAILED: - printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", - fc_host_port_id(lport->host)); + printk(KERN_ERR "host%d: libfc: " + "Discovery failed for port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); mutex_lock(&lport->lp_mutex); fc_lport_enter_reset(lport); mutex_unlock(&lport->lp_mutex); @@ -791,8 +792,9 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, goto out; remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); if (remote_wwpn == lport->wwpn) { - printk(KERN_WARNING "libfc: Received FLOGI from port " - "with same WWPN %llx\n", remote_wwpn); + printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " + "with same WWPN %llx\n", + lport->host->host_no, remote_wwpn); goto out; } FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); @@ -1471,8 +1473,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; fc_lport_set_port_id(lport, did, fp); - printk(KERN_INFO "libfc: Port (%6x) entered " - "point to point mode\n", did); + printk(KERN_INFO "host%d: libfc: " + "Port (%6x) entered " + "point-to-point mode\n", + lport->host->host_no, did); fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), get_unaligned_be64( &flp->fl_wwpn), -- cgit v1.2.3-59-g8ed1b From 386309ce927a308d7742a6fb24a536d3383fbd49 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:16 -0800 Subject: [SCSI] libfcoe: fcoe: simplify receive FLOGI response There was a locking problem where the fip->lock was held during the call to update_mac(). The rtnl_lock() must be taken before the fip->lock, not the other way around. This fixes that. Now that fcoe_ctlr_recv_flog() is called only from the response handler to a FLOGI request, some checking can be eliminated. Instead of calling update_mac(), just fill in the granted_mac address for the passed-in frame (skb). Eliminate the passed-in source MAC address since it is also in the skb. Also, in fcoe, call fcoe_set_src_mac() directly instead of going thru the fip function pointer. This will generate less code. Then, since fip isn't needed for LOGO response, use lport as the arg. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 15 +++++---------- drivers/scsi/fcoe/libfcoe.c | 12 ++++++------ include/scsi/libfcoe.h | 2 +- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b15ec996b477..343900ac0ece 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2247,15 +2247,12 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) mac = fr_cb(fp)->granted_mac; if (is_zero_ether_addr(mac)) { /* pre-FIP */ - mac = eth_hdr(&fp->skb)->h_source; - if (fcoe_ctlr_recv_flogi(fip, lport, fp, mac)) { + if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { fc_frame_free(fp); return; } - } else { - /* FIP, libfcoe has already seen it */ - fip->update_mac(lport, fr_cb(fp)->granted_mac); } + fcoe_update_src_mac(lport, mac); done: fc_lport_flogi_resp(seq, fp, lport); } @@ -2271,13 +2268,11 @@ done: */ static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { - struct fcoe_ctlr *fip = arg; - struct fc_exch *exch = fc_seq_exch(seq); - struct fc_lport *lport = exch->lp; + struct fc_lport *lport = arg; static u8 zero_mac[ETH_ALEN] = { 0 }; if (!IS_ERR(fp)) - fip->update_mac(lport, zero_mac); + fcoe_update_src_mac(lport, zero_mac); fc_lport_logo_resp(seq, fp, lport); } @@ -2312,7 +2307,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) break; return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, - fip, timeout); + lport, timeout); } return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); } diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 2aab97221c6c..2988b71d1e87 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -1254,10 +1254,9 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) } /** - * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response or request + * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response * @fip: The FCoE controller * @fp: The FC frame to snoop - * @sa: Ethernet source MAC address from received FCoE frame * * Snoop potential response to FLOGI or even incoming FLOGI. * @@ -1265,16 +1264,18 @@ static void fcoe_ctlr_recv_work(struct work_struct *recv_work) * by fip->flogi_oxid != FC_XID_UNKNOWN. * * The caller is responsible for freeing the frame. + * Fill in the granted_mac address. * * Return non-zero if the frame should not be delivered to libfc. */ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, - struct fc_frame *fp, u8 *sa) + struct fc_frame *fp) { struct fc_frame_header *fh; u8 op; - u8 mac[ETH_ALEN]; + u8 *sa; + sa = eth_hdr(&fp->skb)->h_source; fh = fc_frame_header_get(fp); if (fh->fh_type != FC_TYPE_ELS) return 0; @@ -1305,9 +1306,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, fip->map_dest = 0; } fip->flogi_oxid = FC_XID_UNKNOWN; - fc_fcoe_set_mac(mac, fh->fh_d_id); - fip->update_mac(lport, mac); spin_unlock_bh(&fip->lock); + fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { /* * Save source MAC for point-to-point responses. diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index e38ffa05dc26..3837872f1965 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -165,7 +165,7 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *); int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct fc_lport *, struct sk_buff *); void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *); int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, - struct fc_frame *, u8 *); + struct fc_frame *); /* libfcoe funcs */ u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); -- cgit v1.2.3-59-g8ed1b From 78112e5558064cb4d2e355aed87b2036fcdfe3dd Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:22 -0800 Subject: [SCSI] fnic: Add FIP support to the fnic driver Use libfcoe as a common FIP implementation with fcoe. FIP or non-FIP mode is fully automatic if the firmware supports and enables it. Even if FIP is not supported, this uses libfcoe for the non-FIP handling of FLOGI and its response. Use the new lport_set_port_id() notification to capture successful FLOGI responses and port_id resets. While transitioning between Ethernet and FC mode, all rx and tx FC frames are queued. In Ethernet mode, all frames are passed to the exchange manager to capture FLOGI responses. Change to set data_src_addr to the ctl_src_addr whenever it would have previously been zero because we're not logged in. This seems safer so we'll never send a frame with a 0 source MAC. This also eliminates a special case for sending FLOGI frames. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 2 +- drivers/scsi/fnic/fnic.h | 23 +- drivers/scsi/fnic/fnic_fcs.c | 499 ++++++++++++++++++++---------------------- drivers/scsi/fnic/fnic_main.c | 71 +++--- drivers/scsi/fnic/fnic_res.c | 5 +- drivers/scsi/fnic/fnic_res.h | 50 +++++ drivers/scsi/fnic/fnic_scsi.c | 73 +++--- drivers/scsi/fnic/vnic_scsi.h | 1 + 8 files changed, 382 insertions(+), 342 deletions(-) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index b4d8d63a34b2..36900c71a592 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -663,7 +663,7 @@ config FCOE config FCOE_FNIC tristate "Cisco FNIC Driver" depends on PCI && X86 - select LIBFC + select LIBFCOE help This is support for the Cisco PCI-Express FCoE HBA. diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 1bc267e892d2..bb208a6091e7 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -22,6 +22,7 @@ #include #include #include +#include #include "fnic_io.h" #include "fnic_res.h" #include "vnic_dev.h" @@ -145,6 +146,7 @@ struct mempool; /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ struct vnic_dev_bar bar0; struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; @@ -162,23 +164,16 @@ struct fnic { unsigned int wq_count; unsigned int cq_count; - u32 fcoui_mode:1; /* use fcoui address*/ u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ struct completion *remove_wait; /* device remove thread blocks */ - struct fc_frame *flogi; - struct fc_frame *flogi_resp; - u16 flogi_oxid; - unsigned long s_id; enum fnic_state state; spinlock_t fnic_lock; u16 vlan_id; /* VLAN tag including priority */ - u8 mac_addr[ETH_ALEN]; - u8 dest_addr[ETH_ALEN]; u8 data_src_addr[ETH_ALEN]; u64 fcp_input_bytes; /* internal statistic */ u64 fcp_output_bytes; /* internal statistic */ @@ -205,6 +200,7 @@ struct fnic { struct work_struct link_work; struct work_struct frame_work; struct sk_buff_head frame_queue; + struct sk_buff_head tx_queue; /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; @@ -224,6 +220,11 @@ struct fnic { ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; }; +static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) +{ + return container_of(fip, struct fnic, ctlr); +} + extern struct workqueue_struct *fnic_event_queue; extern struct device_attribute *fnic_attrs[]; @@ -239,7 +240,11 @@ void fnic_handle_link(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); -int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); +void fnic_flush_tx(struct fnic *); +void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); +void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); +void fnic_update_mac(struct fc_lport *, u8 *new); +void fnic_update_mac_locked(struct fnic *, u8 *new); int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); int fnic_abort_cmd(struct scsi_cmnd *); @@ -252,7 +257,7 @@ void fnic_empty_scsi_cleanup(struct fc_lport *); void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); int fnic_wq_cmpl_handler(struct fnic *fnic, int); -int fnic_flogi_reg_handler(struct fnic *fnic); +int fnic_flogi_reg_handler(struct fnic *fnic, u32); void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc); int fnic_fw_reset_handler(struct fnic *fnic); diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 50db3e36a619..54f8d0e5407f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,8 @@ struct workqueue_struct *fnic_event_queue; +static void fnic_set_eth_mode(struct fnic *); + void fnic_handle_link(struct work_struct *work) { struct fnic *fnic = container_of(work, struct fnic, link_work); @@ -64,10 +67,10 @@ void fnic_handle_link(struct work_struct *work) spin_unlock_irqrestore(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); - fc_linkdown(fnic->lport); + fcoe_ctlr_link_down(&fnic->ctlr); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); - fc_linkup(fnic->lport); + fcoe_ctlr_link_up(&fnic->ctlr); } else /* UP -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -76,13 +79,13 @@ void fnic_handle_link(struct work_struct *work) /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); - fc_linkup(fnic->lport); + fcoe_ctlr_link_up(&fnic->ctlr); } else { /* UP -> DOWN */ fnic->lport->host_stats.link_failure_count++; spin_unlock_irqrestore(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); - fc_linkdown(fnic->lport); + fcoe_ctlr_link_down(&fnic->ctlr); } } @@ -107,197 +110,179 @@ void fnic_handle_frame(struct work_struct *work) return; } fp = (struct fc_frame *)skb; - /* if Flogi resp frame, register the address */ - if (fr_flags(fp)) { - vnic_dev_add_addr(fnic->vdev, - fnic->data_src_addr); - fr_flags(fp) = 0; + + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); fc_exch_recv(lp, fp); } - -} - -static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, - u32 len, u8 sof, u8 eof) -{ - struct fc_frame *fp = (struct fc_frame *)skb; - - skb_trim(skb, len); - fr_eof(fp) = eof; - fr_sof(fp) = sof; } - -static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) +/** + * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. + * @fnic: fnic instance. + * @skb: Ethernet Frame. + */ +static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) { struct fc_frame *fp; struct ethhdr *eh; - struct vlan_ethhdr *vh; struct fcoe_hdr *fcoe_hdr; struct fcoe_crc_eof *ft; - u32 transport_len = 0; + /* + * Undo VLAN encapsulation if present. + */ eh = (struct ethhdr *)skb->data; - vh = (struct vlan_ethhdr *)skb->data; - if (vh->h_vlan_proto == htons(ETH_P_8021Q) && - vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { - skb_pull(skb, sizeof(struct vlan_ethhdr)); - transport_len += sizeof(struct vlan_ethhdr); - } else if (eh->h_proto == htons(ETH_P_FCOE)) { - transport_len += sizeof(struct ethhdr); - skb_pull(skb, sizeof(struct ethhdr)); - } else - return -1; + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + fcoe_ctlr_recv(&fnic->ctlr, skb); + return 1; /* let caller know packet was used */ + } + if (eh->h_proto != htons(ETH_P_FCOE)) + goto drop; + skb_set_network_header(skb, sizeof(*eh)); + skb_pull(skb, sizeof(*eh)); fcoe_hdr = (struct fcoe_hdr *)skb->data; if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) - return -1; + goto drop; fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_sof(fp) = fcoe_hdr->fcoe_sof; skb_pull(skb, sizeof(struct fcoe_hdr)); - transport_len += sizeof(struct fcoe_hdr); + skb_reset_transport_header(skb); - ft = (struct fcoe_crc_eof *)(skb->data + len - - transport_len - sizeof(*ft)); + ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); fr_eof(fp) = ft->fcoe_eof; - skb_trim(skb, len - transport_len - sizeof(*ft)); + skb_trim(skb, skb->len - sizeof(*ft)); return 0; +drop: + dev_kfree_skb_irq(skb); + return -1; } -static inline int fnic_handle_flogi_resp(struct fnic *fnic, - struct fc_frame *fp) +/** + * fnic_update_mac_locked() - set data MAC address and filters. + * @fnic: fnic instance. + * @new: newly-assigned FCoE MAC address. + * + * Called with the fnic lock held. + */ +void fnic_update_mac_locked(struct fnic *fnic, u8 *new) { - u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; - struct ethhdr *eth_hdr; - struct fc_frame_header *fh; - int ret = 0; - unsigned long flags; - struct fc_frame *old_flogi_resp = NULL; + u8 *ctl = fnic->ctlr.ctl_src_addr; + u8 *data = fnic->data_src_addr; - fh = (struct fc_frame_header *)fr_hdr(fp); + if (is_zero_ether_addr(new)) + new = ctl; + if (!compare_ether_addr(data, new)) + return; + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); + if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) + vnic_dev_del_addr(fnic->vdev, data); + memcpy(data, new, ETH_ALEN); + if (compare_ether_addr(new, ctl)) + vnic_dev_add_addr(fnic->vdev, new); +} - spin_lock_irqsave(&fnic->fnic_lock, flags); +/** + * fnic_update_mac() - set data MAC address and filters. + * @lport: local port. + * @new: newly-assigned FCoE MAC address. + */ +void fnic_update_mac(struct fc_lport *lport, u8 *new) +{ + struct fnic *fnic = lport_priv(lport); - if (fnic->state == FNIC_IN_ETH_MODE) { + spin_lock_irq(&fnic->fnic_lock); + fnic_update_mac_locked(fnic, new); + spin_unlock_irq(&fnic->fnic_lock); +} - /* - * Check if oxid matches on taking the lock. A new Flogi - * issued by libFC might have changed the fnic cached oxid - */ - if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Flogi response oxid not" - " matching cached oxid, dropping frame" - "\n"); - ret = -1; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb_irq(fp_skb(fp)); - goto handle_flogi_resp_end; - } +/** + * fnic_set_port_id() - set the port_ID after successful FLOGI. + * @lport: local port. + * @port_id: assigned FC_ID. + * @fp: received frame containing the FLOGI accept or NULL. + * + * This is called from libfc when a new FC_ID has been assigned. + * This causes us to reset the firmware to FC_MODE and setup the new MAC + * address and FC_ID. + * + * It is also called with FC_ID 0 when we're logged off. + * + * If the FC_ID is due to point-to-point, fp may be NULL. + */ +void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) +{ + struct fnic *fnic = lport_priv(lport); + u8 *mac; + int ret; - /* Drop older cached flogi response frame, cache this frame */ - old_flogi_resp = fnic->flogi_resp; - fnic->flogi_resp = fp; - fnic->flogi_oxid = FC_XID_UNKNOWN; + FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", + port_id, fp); - /* - * this frame is part of flogi get the src mac addr from this - * frame if the src mac is fcoui based then we mark the - * address mode flag to use fcoui base for dst mac addr - * otherwise we have to store the fcoe gateway addr - */ - eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); - memcpy(mac, eth_hdr->h_source, ETH_ALEN); + /* + * If we're clearing the FC_ID, change to use the ctl_src_addr. + * Set ethernet mode to send FLOGI. + */ + if (!port_id) { + fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); + fnic_set_eth_mode(fnic); + return; + } - if (ntoh24(mac) == FC_FCOE_OUI) - fnic->fcoui_mode = 1; - else { - fnic->fcoui_mode = 0; - memcpy(fnic->dest_addr, mac, ETH_ALEN); + if (fp) { + mac = fr_cb(fp)->granted_mac; + if (is_zero_ether_addr(mac)) { + /* non-FIP - FLOGI already accepted - ignore return */ + fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); } + fnic_update_mac(lport, mac); + } - /* - * Except for Flogi frame, all outbound frames from us have the - * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses - * the vnic MAC address as the Eth Src address - */ - fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); - - /* We get our s_id from the d_id of the flogi resp frame */ - fnic->s_id = ntoh24(fh->fh_d_id); - - /* Change state to reflect transition from Eth to FC mode */ + /* Change state to reflect transition to FC mode */ + spin_lock_irq(&fnic->fnic_lock); + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; - - } else { + else { FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "Unexpected fnic state %s while" " processing flogi resp\n", fnic_state_to_str(fnic->state)); - ret = -1; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb_irq(fp_skb(fp)); - goto handle_flogi_resp_end; + spin_unlock_irq(&fnic->fnic_lock); + return; } - - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - /* Drop older cached frame */ - if (old_flogi_resp) - dev_kfree_skb_irq(fp_skb(old_flogi_resp)); + spin_unlock_irq(&fnic->fnic_lock); /* - * send flogi reg request to firmware, this will put the fnic in - * in FC mode + * Send FLOGI registration to firmware to set up FC mode. + * The new address will be set up when registration completes. */ - ret = fnic_flogi_reg_handler(fnic); + ret = fnic_flogi_reg_handler(fnic, port_id); if (ret < 0) { - int free_fp = 1; - spin_lock_irqsave(&fnic->fnic_lock, flags); - /* - * free the frame is some other thread is not - * pointing to it - */ - if (fnic->flogi_resp != fp) - free_fp = 0; - else - fnic->flogi_resp = NULL; - + spin_lock_irq(&fnic->fnic_lock); if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) fnic->state = FNIC_IN_ETH_MODE; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (free_fp) - dev_kfree_skb_irq(fp_skb(fp)); + spin_unlock_irq(&fnic->fnic_lock); } - - handle_flogi_resp_end: - return ret; -} - -/* Returns 1 for a response that matches cached flogi oxid */ -static inline int is_matching_flogi_resp_frame(struct fnic *fnic, - struct fc_frame *fp) -{ - struct fc_frame_header *fh; - int ret = 0; - u32 f_ctl; - - fh = fc_frame_header_get(fp); - f_ctl = ntoh24(fh->fh_f_ctl); - - if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && - fh->fh_r_ctl == FC_RCTL_ELS_REP && - (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && - fh->fh_type == FC_TYPE_ELS) - ret = 1; - - return ret; } static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc @@ -326,6 +311,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); skb = buf->os_buf; + fp = (struct fc_frame *)skb; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); @@ -338,6 +324,9 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc &fcoe_enc_error, &fcs_ok, &vlan_stripped, &vlan); eth_hdrs_stripped = 1; + skb_trim(skb, fcp_bytes_written); + fr_sof(fp) = sof; + fr_eof(fp) = eof; } else if (type == CQ_DESC_TYPE_RQ_ENET) { cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, @@ -352,6 +341,14 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); eth_hdrs_stripped = 0; + skb_trim(skb, bytes_written); + if (!fcs_ok) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "fcs error. dropping packet.\n"); + goto drop; + } + if (fnic_import_rq_eth_pkt(fnic, skb)) + return; } else { /* wrong CQ type*/ @@ -370,43 +367,11 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc goto drop; } - if (eth_hdrs_stripped) - fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); - else if (fnic_import_rq_eth_pkt(skb, bytes_written)) - goto drop; - - fp = (struct fc_frame *)skb; - - /* - * If frame is an ELS response that matches the cached FLOGI OX_ID, - * and is accept, issue flogi_reg_request copy wq request to firmware - * to register the S_ID and determine whether FC_OUI mode or GW mode. - */ - if (is_matching_flogi_resp_frame(fnic, fp)) { - if (!eth_hdrs_stripped) { - if (fc_frame_payload_op(fp) == ELS_LS_ACC) { - fnic_handle_flogi_resp(fnic, fp); - return; - } - /* - * Recd. Flogi reject. No point registering - * with fw, but forward to libFC - */ - goto forward; - } - goto drop; - } - if (!eth_hdrs_stripped) - goto drop; - -forward: spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto drop; } - /* Use fr_flags to indicate whether succ. flogi resp or not */ - fr_flags(fp) = 0; fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -494,12 +459,49 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) buf->os_buf = NULL; } -static inline int is_flogi_frame(struct fc_frame_header *fh) +/** + * fnic_eth_send() - Send Ethernet frame. + * @fip: fcoe_ctlr instance. + * @skb: Ethernet Frame, FIP, without VLAN encapsulation. + */ +void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { - return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; + struct fnic *fnic = fnic_from_ctlr(fip); + struct vnic_wq *wq = &fnic->wq[0]; + dma_addr_t pa; + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + unsigned long flags; + + if (!fnic->vlan_hw_insert) { + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, + sizeof(*vlan_hdr) - sizeof(*eth_hdr)); + memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + } + + pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + + spin_lock_irqsave(&fnic->wq_lock[0], flags); + if (!vnic_wq_desc_avail(wq)) { + pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + kfree_skb(skb); + return; + } + + fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, + fnic->vlan_hw_insert, fnic->vlan_id, 1); + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); } -int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +/* + * Send FC frame. + */ +static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) { struct vnic_wq *wq = &fnic->wq[0]; struct sk_buff *skb; @@ -515,6 +517,10 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) fh = fc_frame_header_get(fp); skb = fp_skb(fp); + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && + fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) + return 0; + if (!fnic->vlan_hw_insert) { eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); @@ -530,16 +536,11 @@ int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); } - if (is_flogi_frame(fh)) { + if (fnic->ctlr.map_dest) fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); - memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); - } else { - if (fnic->fcoui_mode) - fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); - else - memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); - memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); - } + else + memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); tot_len = skb->len; BUG_ON(tot_len % 4); @@ -578,109 +579,85 @@ fnic_send_frame_end: int fnic_send(struct fc_lport *lp, struct fc_frame *fp) { struct fnic *fnic = lport_priv(lp); - struct fc_frame_header *fh; - int ret = 0; - enum fnic_state old_state; unsigned long flags; - struct fc_frame *old_flogi = NULL; - struct fc_frame *old_flogi_resp = NULL; if (fnic->in_remove) { dev_kfree_skb(fp_skb(fp)); - ret = -1; - goto fnic_send_end; + return -1; } - fh = fc_frame_header_get(fp); - /* if not an Flogi frame, send it out, this is the common case */ - if (!is_flogi_frame(fh)) - return fnic_send_frame(fnic, fp); + /* + * Queue frame if in a transitional state. + * This occurs while registering the Port_ID / MAC address after FLOGI. + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - /* Flogi frame, now enter the state machine */ + return fnic_send_frame(fnic, fp); +} - spin_lock_irqsave(&fnic->fnic_lock, flags); -again: - /* Get any old cached frames, free them after dropping lock */ - old_flogi = fnic->flogi; - fnic->flogi = NULL; - old_flogi_resp = fnic->flogi_resp; - fnic->flogi_resp = NULL; +/** + * fnic_flush_tx() - send queued frames. + * @fnic: fnic device + * + * Send frames that were waiting to go out in FC or Ethernet mode. + * Whenever changing modes we purge queued frames, so these frames should + * be queued for the stable mode that we're in, either FC or Ethernet. + * + * Called without fnic_lock held. + */ +void fnic_flush_tx(struct fnic *fnic) +{ + struct sk_buff *skb; + struct fc_frame *fp; - fnic->flogi_oxid = FC_XID_UNKNOWN; + while ((skb = skb_dequeue(&fnic->frame_queue))) { + fp = (struct fc_frame *)skb; + fnic_send_frame(fnic, fp); + } +} +/** + * fnic_set_eth_mode() - put fnic into ethernet mode. + * @fnic: fnic device + * + * Called without fnic lock held. + */ +static void fnic_set_eth_mode(struct fnic *fnic) +{ + unsigned long flags; + enum fnic_state old_state; + int ret; + + spin_lock_irqsave(&fnic->fnic_lock, flags); +again: old_state = fnic->state; switch (old_state) { case FNIC_IN_FC_MODE: case FNIC_IN_ETH_TRANS_FC_MODE: default: fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (old_flogi) { - dev_kfree_skb(fp_skb(old_flogi)); - old_flogi = NULL; - } - if (old_flogi_resp) { - dev_kfree_skb(fp_skb(old_flogi_resp)); - old_flogi_resp = NULL; - } - ret = fnic_fw_reset_handler(fnic); spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) goto again; - if (ret) { + if (ret) fnic->state = old_state; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(fp_skb(fp)); - goto fnic_send_end; - } - old_flogi = fnic->flogi; - fnic->flogi = fp; - fnic->flogi_oxid = ntohs(fh->fh_ox_id); - old_flogi_resp = fnic->flogi_resp; - fnic->flogi_resp = NULL; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); break; case FNIC_IN_FC_TRANS_ETH_MODE: - /* - * A reset is pending with the firmware. Store the flogi - * and its oxid. The transition out of this state happens - * only when Firmware completes the reset, either with - * success or failed. If success, transition to - * FNIC_IN_ETH_MODE, if fail, then transition to - * FNIC_IN_FC_MODE - */ - fnic->flogi = fp; - fnic->flogi_oxid = ntohs(fh->fh_ox_id); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - break; - case FNIC_IN_ETH_MODE: - /* - * The fw/hw is already in eth mode. Store the oxid, - * and send the flogi frame out. The transition out of this - * state happens only we receive flogi response from the - * network, and the oxid matches the cached oxid when the - * flogi frame was sent out. If they match, then we issue - * a flogi_reg request and transition to state - * FNIC_IN_ETH_TRANS_FC_MODE - */ - fnic->flogi_oxid = ntohs(fh->fh_ox_id); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - ret = fnic_send_frame(fnic, fp); break; } - -fnic_send_end: - if (old_flogi) - dev_kfree_skb(fp_skb(old_flogi)); - if (old_flogi_resp) - dev_kfree_skb(fp_skb(old_flogi_resp)); - return ret; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); } static void fnic_wq_complete_frame_send(struct vnic_wq *wq, diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 018cc427504a..0333c7f52e66 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -68,6 +70,7 @@ MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); static struct libfc_function_template fnic_transport_template = { .frame_send = fnic_send, + .lport_set_port_id = fnic_set_port_id, .fcp_abort_io = fnic_empty_scsi_cleanup, .fcp_cleanup = fnic_empty_scsi_cleanup, .exch_mgr_reset = fnic_exch_mgr_reset @@ -324,9 +327,6 @@ static int fnic_cleanup(struct fnic *fnic) { unsigned int i; int err; - unsigned long flags; - struct fc_frame *flogi = NULL; - struct fc_frame *flogi_resp = NULL; vnic_dev_disable(fnic->vdev); for (i = 0; i < fnic->intr_count; i++) @@ -367,24 +367,6 @@ static int fnic_cleanup(struct fnic *fnic) for (i = 0; i < fnic->intr_count; i++) vnic_intr_clean(&fnic->intr[i]); - /* - * Remove cached flogi and flogi resp frames if any - * These frames are not in any queue, and therefore queue - * cleanup does not clean them. So clean them explicitly - */ - spin_lock_irqsave(&fnic->fnic_lock, flags); - flogi = fnic->flogi; - fnic->flogi = NULL; - flogi_resp = fnic->flogi_resp; - fnic->flogi_resp = NULL; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - if (flogi) - dev_kfree_skb(fp_skb(flogi)); - - if (flogi_resp) - dev_kfree_skb(fp_skb(flogi_resp)); - mempool_destroy(fnic->io_req_pool); for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) mempool_destroy(fnic->io_sgl_pool[i]); @@ -409,6 +391,17 @@ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); } +/** + * fnic_get_mac() - get assigned data MAC address for FIP code. + * @lport: local port. + */ +static u8 *fnic_get_mac(struct fc_lport *lport) +{ + struct fnic *fnic = lport_priv(lport); + + return fnic->data_src_addr; +} + static int __devinit fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -433,6 +426,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, host = lp->host; fnic = lport_priv(lp); fnic->lport = lp; + fnic->ctlr.lp = lp; snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, host->host_no); @@ -541,12 +535,14 @@ static int __devinit fnic_probe(struct pci_dev *pdev, goto err_out_dev_close; } - err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); + err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC get MAC addr failed \n"); goto err_out_dev_close; } + /* set data_src for point-to-point mode and to keep it non-zero */ + memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); @@ -615,9 +611,21 @@ static int __devinit fnic_probe(struct pci_dev *pdev, fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; - fnic->flogi_oxid = FC_XID_UNKNOWN; - fnic->flogi = NULL; - fnic->flogi_resp = NULL; + /* Initialize the FIP fcoe_ctrl struct */ + fnic->ctlr.send = fnic_eth_send; + fnic->ctlr.update_mac = fnic_update_mac; + fnic->ctlr.get_src_addr = fnic_get_mac; + fcoe_ctlr_init(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + shost_printk(KERN_INFO, fnic->lport->host, + "firmware supports FIP\n"); + vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); + vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + } else { + shost_printk(KERN_INFO, fnic->lport->host, + "firmware uses non-FIP mode\n"); + fnic->ctlr.mode = FIP_ST_NON_FIP; + } fnic->state = FNIC_IN_FC_MODE; /* Enable hardware stripping of vlan header on ingress */ @@ -708,6 +716,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); skb_queue_head_init(&fnic->frame_queue); + skb_queue_head_init(&fnic->tx_queue); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) @@ -738,8 +747,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev, err_out_free_exch_mgr: fc_exch_mgr_free(lp); err_out_remove_scsi_host: - fc_remove_host(fnic->lport->host); - scsi_remove_host(fnic->lport->host); + fc_remove_host(lp->host); + scsi_remove_host(lp->host); err_out_free_rq_buf: for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); @@ -773,6 +782,7 @@ err_out: static void __devexit fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); + struct fc_lport *lp = fnic->lport; unsigned long flags; /* @@ -794,6 +804,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) */ flush_workqueue(fnic_event_queue); skb_queue_purge(&fnic->frame_queue); + skb_queue_purge(&fnic->tx_queue); /* * Log off the fabric. This stops all remote ports, dns port, @@ -806,7 +817,8 @@ static void __devexit fnic_remove(struct pci_dev *pdev) fnic->in_remove = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fc_lport_destroy(fnic->lport); + fcoe_ctlr_destroy(&fnic->ctlr); + fc_lport_destroy(lp); /* * This stops the fnic device, masks all interrupts. Completed @@ -816,6 +828,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) fnic_cleanup(fnic); BUG_ON(!skb_queue_empty(&fnic->frame_queue)); + BUG_ON(!skb_queue_empty(&fnic->tx_queue)); spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); @@ -834,7 +847,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); - scsi_host_put(fnic->lport->host); + scsi_host_put(lp->host); } static struct pci_driver fnic_driver = { diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c index 7ba61ec715d2..50488f8e169d 100644 --- a/drivers/scsi/fnic/fnic_res.c +++ b/drivers/scsi/fnic/fnic_res.c @@ -144,10 +144,9 @@ int fnic_get_vnic_config(struct fnic *fnic) c->intr_timer_type = c->intr_timer_type; shost_printk(KERN_INFO, fnic->lport->host, - "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " + "vNIC MAC addr %pM " "wq/wq_copy/rq %d/%d/%d\n", - fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], - fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5], + fnic->ctlr.ctl_src_addr, c->wq_enet_desc_count, c->wq_copy_desc_count, c->rq_desc_count); shost_printk(KERN_INFO, fnic->lport->host, diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h index 88c4471c18f0..ef8aaf2156dd 100644 --- a/drivers/scsi/fnic/fnic_res.h +++ b/drivers/scsi/fnic/fnic_res.h @@ -51,6 +51,31 @@ static inline void fnic_queue_wq_desc(struct vnic_wq *wq, vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); } +static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + 0, /* fc_eof */ + 0, /* offload_mode */ + 1, /* eop */ + (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); +} + static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, u32 req_id, u32 lunmap_id, u8 spl_flags, @@ -134,12 +159,37 @@ static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, desc->hdr.tag.u.req_id = req_id; /* id for this request */ desc->u.flogi_reg.format = format; + desc->u.flogi_reg._resvd = 0; hton24(desc->u.flogi_reg.s_id, s_id); memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); vnic_wq_copy_post(wq); } +static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, + u32 req_id, u32 s_id, + u8 *fcf_mac, u8 *ha_mac, + u32 r_a_tov, u32 e_d_tov) +{ + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); + + desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ + desc->hdr.status = 0; /* header status entry */ + desc->hdr._resvd = 0; /* reserved */ + desc->hdr.tag.u.req_id = req_id; /* id for this request */ + + desc->u.flogi_fip_reg._resvd0 = 0; + hton24(desc->u.flogi_fip_reg.s_id, s_id); + memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); + desc->u.flogi_fip_reg._resvd1 = 0; + desc->u.flogi_fip_reg.r_a_tov = r_a_tov; + desc->u.flogi_fip_reg.e_d_tov = e_d_tov; + memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); + desc->u.flogi_fip_reg._resvd2 = 0; + + vnic_wq_copy_post(wq); +} + static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, u32 req_id) { diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8d26d7a9f01b..65a39b0f6dc2 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -174,6 +174,9 @@ int fnic_fw_reset_handler(struct fnic *fnic) int ret = 0; unsigned long flags; + skb_queue_purge(&fnic->frame_queue); + skb_queue_purge(&fnic->tx_queue); + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) @@ -200,9 +203,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) * fnic_flogi_reg_handler * Routine to send flogi register msg to fw */ -int fnic_flogi_reg_handler(struct fnic *fnic) +int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + enum fcpio_flogi_reg_format_type format; + struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; @@ -217,23 +222,32 @@ int fnic_flogi_reg_handler(struct fnic *fnic) goto flogi_reg_ioreq_end; } - if (fnic->fcoui_mode) + if (fnic->ctlr.map_dest) { memset(gw_mac, 0xff, ETH_ALEN); - else - memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); + format = FCPIO_FLOGI_REG_DEF_DEST; + } else { + memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); + format = FCPIO_FLOGI_REG_GW_DEST; + } - fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, - FCPIO_FLOGI_REG_GW_DEST, - fnic->s_id, - gw_mac); + if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, + fc_id, gw_mac, + fnic->data_src_addr, + lp->r_a_tov, lp->e_d_tov); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", + fc_id, fnic->data_src_addr, gw_mac); + } else { + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, + format, fc_id, gw_mac); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "FLOGI reg issued fcid %x map %d dest %pM\n", + fc_id, fnic->ctlr.map_dest, gw_mac); + } flogi_reg_ioreq_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); - - if (!ret) - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "flog reg issued\n"); - return ret; } @@ -453,7 +467,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, u8 hdr_status; struct fcpio_tag tag; int ret = 0; - struct fc_frame *flogi; unsigned long flags; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); @@ -463,9 +476,6 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, spin_lock_irqsave(&fnic->fnic_lock, flags); - flogi = fnic->flogi; - fnic->flogi = NULL; - /* fnic should be in FC_TRANS_ETH_MODE */ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ @@ -506,17 +516,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, * free the flogi frame. Else, send it out */ if (fnic->remove_wait || ret) { - fnic->flogi_oxid = FC_XID_UNKNOWN; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (flogi) - dev_kfree_skb_irq(fp_skb(flogi)); + skb_queue_purge(&fnic->tx_queue); goto reset_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (flogi) - ret = fnic_send_frame(fnic, flogi); + fnic_flush_tx(fnic); reset_cmpl_handler_end: return ret; @@ -533,18 +540,13 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, u8 hdr_status; struct fcpio_tag tag; int ret = 0; - struct fc_frame *flogi_resp = NULL; unsigned long flags; - struct sk_buff *skb; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); /* Update fnic state based on status of flogi reg completion */ spin_lock_irqsave(&fnic->fnic_lock, flags); - flogi_resp = fnic->flogi_resp; - fnic->flogi_resp = NULL; - if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { /* Check flogi registration completion status */ @@ -568,25 +570,17 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, ret = -1; } - /* Successful flogi reg cmpl, pass frame to LibFC */ - if (!ret && flogi_resp) { + if (!ret) { if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto reg_cmpl_handler_end; } - skb = (struct sk_buff *)flogi_resp; - /* Use fr_flags to indicate whether flogi resp or not */ - fr_flags(flogi_resp) = 1; - fr_dev(flogi_resp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - skb_queue_tail(&fnic->frame_queue, skb); + fnic_flush_tx(fnic); queue_work(fnic_event_queue, &fnic->frame_work); - } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (flogi_resp) - dev_kfree_skb_irq(fp_skb(flogi_resp)); } reg_cmpl_handler_end: @@ -908,6 +902,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, break; case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); break; @@ -1747,7 +1742,7 @@ void fnic_scsi_abort_io(struct fc_lport *lp) fnic->remove_wait = &remove_wait; old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); err = fnic_fw_reset_handler(fnic); @@ -1787,7 +1782,7 @@ void fnic_scsi_cleanup(struct fc_lport *lp) spin_lock_irqsave(&fnic->fnic_lock, flags); old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (fnic_fw_reset_handler(fnic)) { diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index 46baa5254001..fbb55364e272 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -95,5 +95,6 @@ struct vnic_fc_config { #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ #define VFCF_PERBI 0x2 /* persistent binding info available */ +#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */ #endif /* _VNIC_SCSI_H_ */ -- cgit v1.2.3-59-g8ed1b From ab593b187391bdd03ccad2968972a2e118a88cd4 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:27 -0800 Subject: [SCSI] libfc: register FC4 features with the FC switch Customers and certification tests have pointed out that we don't show up on the switch management software as an initiator. On some MDS switches 'show fcns database' command shows libfc initiators as 'fcp' not 'fcp:init' like other initiators. On others switches, I think the switch gets the features by doing a PRLI, but it may be only certain models or under certain configurations. Fix this by registering our FC4 features with the RFF_ID CT request after local port login and after the RFT_ID. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 12 +++++++++++- include/scsi/fc/fc_fcp.h | 6 ++++++ include/scsi/fc/fc_ns.h | 13 ++++++++++++- include/scsi/fc_encode.h | 12 ++++++++++++ include/scsi/libfc.h | 2 ++ 5 files changed, 43 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index d3aec1959394..1bcc5e11d2c0 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -122,6 +122,7 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_RSNN_NN] = "RSNN_NN", [LPORT_ST_RSPN_ID] = "RSPN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", + [LPORT_ST_RFF_ID] = "RFF_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", [LPORT_ST_LOGO] = "LOGO", @@ -1034,6 +1035,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_RSNN_NN: case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: + case LPORT_ST_RFF_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: case LPORT_ST_FLOGI: @@ -1070,7 +1072,7 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFT_ID) { + if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { FC_LPORT_DBG(lport, "Received a name server response, " "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) @@ -1101,6 +1103,9 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); break; case LPORT_ST_RFT_ID: + fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); + break; + case LPORT_ST_RFF_ID: fc_lport_enter_scr(lport); break; default: @@ -1235,6 +1240,10 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) cmd = FC_NS_RFT_ID; size += sizeof(struct fc_ns_rft); break; + case LPORT_ST_RFF_ID: + cmd = FC_NS_RFF_ID; + size += sizeof(struct fc_ns_rff_id); + break; default: fc_lport_error(lport, NULL); return; @@ -1317,6 +1326,7 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_RSNN_NN: case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: + case LPORT_ST_RFF_ID: fc_lport_enter_ns(lport, lport->state); break; case LPORT_ST_SCR: diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h index 5d38f1989f37..29ecb0b02b09 100644 --- a/include/scsi/fc/fc_fcp.h +++ b/include/scsi/fc/fc_fcp.h @@ -196,4 +196,10 @@ struct fcp_srr { __u8 srr_resvd2[3]; /* reserved */ }; +/* + * Feature bits in name server FC-4 Features object. + */ +#define FCP_FEAT_TARG (1 << 0) /* target function supported */ +#define FCP_FEAT_INIT (1 << 1) /* initiator function supported */ + #endif /* _FC_FCP_H_ */ diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h index f4d354eb26b9..e7d3ac497d7d 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/scsi/fc/fc_ns.h @@ -46,10 +46,11 @@ enum fc_ns_req { FC_NS_GID_FT = 0x0171, /* get IDs by FC4 type */ FC_NS_GPN_FT = 0x0172, /* get port names by FC4 type */ FC_NS_GID_PT = 0x01a1, /* get IDs by port type */ - FC_NS_RFT_ID = 0x0217, /* reg FC4 type for ID */ FC_NS_RPN_ID = 0x0212, /* reg port name for ID */ FC_NS_RNN_ID = 0x0213, /* reg node name for ID */ + FC_NS_RFT_ID = 0x0217, /* reg FC4 type for ID */ FC_NS_RSPN_ID = 0x0218, /* reg symbolic port name */ + FC_NS_RFF_ID = 0x021f, /* reg FC4 Features for ID */ FC_NS_RSNN_NN = 0x0239, /* reg symbolic node name */ }; @@ -178,4 +179,14 @@ struct fc_ns_rspn { char fr_name[]; } __attribute__((__packed__)); +/* + * RFF_ID request - register FC-4 Features for ID. + */ +struct fc_ns_rff_id { + struct fc_ns_fid fr_fid; /* port ID object */ + __u8 fr_resvd[2]; + __u8 fr_feat; /* FC-4 Feature bits */ + __u8 fr_type; /* FC-4 type */ +} __attribute__((__packed__)); + #endif /* _FC_NS_H_ */ diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index ab2260cb149c..8eb0a0fc0a71 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -32,6 +32,7 @@ struct fc_ct_req { struct fc_ns_gid_ft gid; struct fc_ns_rn_id rn; struct fc_ns_rft rft; + struct fc_ns_rff_id rff; struct fc_ns_fid fid; struct fc_ns_rsnn snn; struct fc_ns_rspn spn; @@ -131,6 +132,17 @@ static inline int fc_ct_fill(struct fc_lport *lport, ct->payload.rft.fts = lport->fcts; break; + case FC_NS_RFF_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id)); + hton24(ct->payload.rff.fr_fid.fp_fid, + fc_host_port_id(lport->host)); + ct->payload.rff.fr_type = FC_TYPE_FCP; + if (lport->service_params & FCP_SPPF_INIT_FCN) + ct->payload.rff.fr_feat = FCP_FEAT_INIT; + if (lport->service_params & FCP_SPPF_TARG_FCN) + ct->payload.rff.fr_feat |= FCP_FEAT_TARG; + break; + case FC_NS_RNN_ID: ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); hton24(ct->payload.rn.fr_fid.fp_fid, diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 67ce9fa1fee4..2936fbae41e4 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -62,6 +62,7 @@ * @LPORT_ST_DNS: Waiting for name server remote port to become ready * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent + * @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent * @LPORT_ST_SCR: State Change Register (SCR) sent * @LPORT_ST_READY: Ready for use * @LPORT_ST_LOGO: Local port logout (LOGO) sent @@ -75,6 +76,7 @@ enum fc_lport_state { LPORT_ST_RSNN_NN, LPORT_ST_RSPN_ID, LPORT_ST_RFT_ID, + LPORT_ST_RFF_ID, LPORT_ST_SCR, LPORT_ST_READY, LPORT_ST_LOGO, -- cgit v1.2.3-59-g8ed1b From 76d8737c9dda1593d52887c8a11bf3359e447896 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:49:32 -0800 Subject: [SCSI] fnic: enable bsg pass-thru for fcping Add initialization of .bsg_request in the scsi_transport_fc template so that fcping works. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 0333c7f52e66..fe1b1031f7ab 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -143,6 +143,7 @@ static struct fc_function_template fnic_fc_functions = { .get_fc_host_stats = fnic_get_stats, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, + .bsg_request = fc_lport_bsg_request, }; static void fnic_get_host_speed(struct Scsi_Host *shost) -- cgit v1.2.3-59-g8ed1b From bf361707c81f8e8e43e332bfc8838bae76ae021a Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:49:38 -0800 Subject: [SCSI] fcoe: Fix checking san mac address This was fixed before in 7a7f0c7 but it's introduced again recently. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 343900ac0ece..2274fcd4c713 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -275,7 +275,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, rcu_read_lock(); for_each_dev_addr(netdev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && - (is_valid_ether_addr(fip->ctl_src_addr))) { + (is_valid_ether_addr(ha->addr))) { memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); fip->spma = 1; break; -- cgit v1.2.3-59-g8ed1b From 5bab87e6d465d54a2b5899e0f583d42f00dbee2e Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:49:43 -0800 Subject: [SCSI] fcoe: Fix getting san mac for VLAN interface Make sure we are get the SAN MAC address from the real netdev if the input netdev is a VLAN device. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 2274fcd4c713..c1fd7561f0bd 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -249,6 +249,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, { struct fcoe_ctlr *fip = &fcoe->ctlr; struct netdev_hw_addr *ha; + struct net_device *real_dev; u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; @@ -272,8 +273,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, /* look for SAN MAC address, if multiple SAN MACs exist, only * use the first one for SPMA */ + real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? + vlan_dev_real_dev(netdev) : netdev; rcu_read_lock(); - for_each_dev_addr(netdev, ha) { + for_each_dev_addr(real_dev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && (is_valid_ether_addr(ha->addr))) { memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); -- cgit v1.2.3-59-g8ed1b From 75ea89ef63d9ca37f190aebb7da061070005ac6e Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:49:49 -0800 Subject: [SCSI] fcoe: Fix setting lport's WWNN/WWPN to use san mac address We are still using netdev->dev_addr to generate lport's WWNN/WWPN even if the LLD has support for NETDEV_HW_ADDR_T_SAN. Instead, we should just use the fip->ctl_src_addr, which is the NETDEV_HW_ADDR_T_SAN if LLD supports it or it is just the netdev->dev_addr if it does not. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c1fd7561f0bd..fba7ba00c71b 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -599,10 +599,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); if (!lport->vport) { - wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); + wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); fc_set_wwnn(lport, wwnn); /* XXX - 3rd arg needs to be vlan id */ - wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); + wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, 0); fc_set_wwpn(lport, wwpn); } -- cgit v1.2.3-59-g8ed1b From 349e11faa84ebdd6e484572cfe66f2cf4cb483a1 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Nov 2009 11:49:54 -0800 Subject: [SCSI] libfc: do not use DID_NO_CONNECT for pkt alloc failures. DID_NO_CONNECT is not a nice value to use for pkt alloc failures, because you can probably retry and IO will become available again. For the device reset callout, we do not want to set the scsi command result for the above reason, and because we do not need to set the scsi_cmd->result in this path. We and other drivers do not set it for success for example, and we do not set it for other failure. And scsi-ml does not send every command through this path, and it is not expecting us to use the scsi_cmnd struct like a cmd coming thruogh queuecommand. I think it is more for storage in case we need a cmd struct for a tmf and to give us certain params like the LUN. Patch was made over scsi-misc today. Signed-off-by: Mike Christie Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 4bfab4f0ccb3..db252e2722d0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2029,7 +2029,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); if (fsp == NULL) { printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); - sc_cmd->result = DID_NO_CONNECT << 16; goto out; } -- cgit v1.2.3-59-g8ed1b From cc0136c2e9c10e889cb36e39710c0eb10707b396 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:49:59 -0800 Subject: [SCSI] fcoe: Fix using VLAN ID in creating lport's WWWN/WWPN If the underlying netdev is a VLAN device, make sure the VLAN ID is integrated into the WWNN/WWPN name generation. Also added/updated the comments to reflect this change. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index fba7ba00c71b..28029a342892 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -555,6 +555,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) u64 wwnn, wwpn; struct fcoe_interface *fcoe; struct fcoe_port *port; + int vid = 0; /* Setup lport private data to point to fcoe softc */ port = lport_priv(lport); @@ -599,10 +600,16 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); if (!lport->vport) { + /* + * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN: + * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0. + * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID + */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + vid = vlan_dev_vlan_id(netdev); wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); fc_set_wwnn(lport, wwnn); - /* XXX - 3rd arg needs to be vlan id */ - wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, 0); + wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid); fc_set_wwpn(lport, wwpn); } -- cgit v1.2.3-59-g8ed1b From 18fa11efc279c20af5eefff2bbe814ca067e51ae Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 3 Nov 2009 11:50:05 -0800 Subject: [SCSI] libfc, fcoe: fixes for highmem skb linearize panics There are cases outside of our control that may result in a transmit skb being linearized in dev_queue_xmit. There are a couple of bugs in libfc/fcoe that can result in a panic at that point. This patch contains two fixes to prevent those panics. 1) use fast cloning instead of shared skbs with dev_queue_xmit dev_queue_xmit doen't want shared skbuffs being passed in, and __skb_linearize will BUG if the skb is shared. FCoE is holding an extra reference around the call to dev_queue_xmit, so that when it returns an error code indicating the frame has been dropped it can maintain it's own backlog and retransmit. Switch to using fast skb cloning for this instead. 2) don't append compound pages as > PAGE_SIZE skb fragments fc_fcp_send_data will append pages from a scatterlist to the nr_frags[] if the netdev supports it. But, it's using > PAGE_SIZE compound pages as a single skb_frag. In the highmem linearize case that page will be passed to kmap_atomic to get a mapping to copy out of, but kmap_atomic will only allow access to the first PAGE_SIZE part. The memcpy will keep going and cause a page fault once is crosses the first boundary. If fc_fcp_send_data uses linear buffers from the start, it calls kmap_atomic one PAGE_SIZE at a time. That same logic needs to be applied when setting up skb_frags. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 5 +++-- drivers/scsi/libfc/fc_fcp.c | 20 ++++++++++---------- drivers/scsi/libfc/fc_frame.c | 5 +++-- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 28029a342892..b570f39faa3a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1267,10 +1267,11 @@ err2: */ static inline int fcoe_start_io(struct sk_buff *skb) { + struct sk_buff *nskb; int rc; - skb_get(skb); - rc = dev_queue_xmit(skb); + nskb = skb_clone(skb, GFP_ATOMIC); + rc = dev_queue_xmit(nskb); if (rc != 0) return rc; kfree_skb(skb); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index db252e2722d0..c4b58d042f6f 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -530,11 +530,13 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, struct scatterlist *sg; struct fc_frame *fp = NULL; struct fc_lport *lport = fsp->lp; + struct page *page; size_t remaining; size_t t_blen; size_t tlen; size_t sg_bytes; size_t frame_offset, fh_parm_offset; + size_t off; int error; void *data = NULL; void *page_addr; @@ -605,28 +607,26 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, fh_parm_offset = frame_offset; fr_max_payload(fp) = fsp->max_payload; } + + off = offset + sg->offset; sg_bytes = min(tlen, sg->length - offset); + sg_bytes = min(sg_bytes, + (size_t) (PAGE_SIZE - (off & ~PAGE_MASK))); + page = sg_page(sg) + (off >> PAGE_SHIFT); if (using_sg) { - get_page(sg_page(sg)); + get_page(page); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, - sg_page(sg), sg->offset + offset, - sg_bytes); + page, off & ~PAGE_MASK, sg_bytes); fp_skb(fp)->data_len += sg_bytes; fr_len(fp) += sg_bytes; fp_skb(fp)->truesize += PAGE_SIZE; } else { - size_t off = offset + sg->offset; - /* * The scatterlist item may be bigger than PAGE_SIZE, * but we must not cross pages inside the kmap. */ - sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - - (off & ~PAGE_MASK))); - page_addr = kmap_atomic(sg_page(sg) + - (off >> PAGE_SHIFT), - KM_SOFTIRQ0); + page_addr = kmap_atomic(page, KM_SOFTIRQ0); memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), sg_bytes); kunmap_atomic(page_addr, KM_SOFTIRQ0); diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 79c956501bd9..6da01c616964 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -58,12 +58,13 @@ struct fc_frame *_fc_frame_alloc(size_t len) WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); - skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM); + skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; + skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); - skb_reserve(skb, FC_FRAME_HEADROOM); skb_put(skb, len); return fp; } -- cgit v1.2.3-59-g8ed1b From 4ae1e19f251335a24ce6cd13f08b4af560ed8765 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 3 Nov 2009 11:50:10 -0800 Subject: [SCSI] libfc: fix an issue of pending exch/es after i/f destroyed or rmmod fcoe All exches must be freed before its EM mempool destroyed in this case but currently some exches could be still pending in their scheduled delayed work after EM mempool is destroyed causing this issue discussed and reported in this latest email thread:- http://www.open-fcoe.org/pipermail/devel/2009-October/004788.html This patch fixes this issue by adding dedicated work queue thread fc_exch_workqueue for exch delayed work and then flush this work queue before destroying EM mempool. The cancel_delayed_work_sync cannot be called during final fc_exch_reset due to lport and exch locking ordering, so removes related comment block not relevant any more with this patch. Reported-by: Joe Eykholt Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 0f45bb8521f1..19d711cb938c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -38,6 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ +struct workqueue_struct *fc_exch_workqueue; /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -427,8 +428,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, FC_EXCH_DBG(ep, "Exchange timer armed\n"); - if (schedule_delayed_work(&ep->timeout_work, - msecs_to_jiffies(timer_msec))) + if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) fc_exch_hold(ep); /* hold for timer */ } @@ -1619,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep) spin_lock_bh(&ep->ex_lock); ep->state |= FC_EX_RST_CLEANUP; - /* - * we really want to call del_timer_sync, but cannot due - * to the lport calling with the lport lock held (some resp - * functions can also grab the lport lock which could cause - * a deadlock). - */ if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ resp = ep->resp; @@ -2203,6 +2198,7 @@ void fc_exch_mgr_free(struct fc_lport *lport) { struct fc_exch_mgr_anchor *ema, *next; + flush_workqueue(fc_exch_workqueue); list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) fc_exch_mgr_del(ema); } @@ -2338,6 +2334,9 @@ int fc_setup_exch_mgr() } fc_cpu_mask--; + fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); + if (!fc_exch_workqueue) + return -ENOMEM; return 0; } @@ -2346,5 +2345,6 @@ int fc_setup_exch_mgr() */ void fc_destroy_exch_mgr() { + destroy_workqueue(fc_exch_workqueue); kmem_cache_destroy(fc_em_cachep); } -- cgit v1.2.3-59-g8ed1b From be276cbe1bd680ab1f6c297017dd658e5a6b10d2 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 3 Nov 2009 11:50:16 -0800 Subject: [SCSI] libfcoe: Do not pad FIP keep-alive to full frame size According to the FC-BB-5 Rev2.0, 7.8.6.2, we should not pad FIP keep-alive frames. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 2988b71d1e87..3c501d4973e3 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -351,8 +351,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, if (!fcf || !fc_host_port_id(lp->host)) return; - len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr); - BUG_ON(len < sizeof(*kal) + sizeof(*vn)); + len = sizeof(*kal) + ports * sizeof(*vn); skb = dev_alloc_skb(len); if (!skb) return; -- cgit v1.2.3-59-g8ed1b From b94f8951bf256674eca3f2a490df17521442afef Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 3 Nov 2009 11:50:21 -0800 Subject: [SCSI] libfc fcoe: increase ELS and CT timeouts The FC-LS spec. says ELS timeouts should be 2 x R_A_TOV. The FC-GS spec. says CT timeouts should be 3 x R_A_TOV. We've been using E_D_TOV for both of those. Change for all ELS and CT requests except FLOGI, which we leave at 2 seconds (using E_D_TOV). One could argue that R_A_TOV is locally determined until after FLOGI succeeds. This does change FLOGI for vports which becomes FDISC. This does not change the REC/SRR timeout which is 2 seconds. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/libfc/fc_disc.c | 5 +++-- drivers/scsi/libfc/fc_lport.c | 12 ++++++++---- drivers/scsi/libfc/fc_rport.c | 15 ++++++++++----- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b570f39faa3a..4a43b74c0d27 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2428,5 +2428,5 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) if (!fp) return; lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, - NULL, NULL, lport->e_d_tov); + NULL, NULL, 3 * lport->r_a_tov); } diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 7b790ad15a93..9b0a5192a965 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -370,7 +370,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) if (lport->tt.elsct_send(lport, 0, fp, FC_NS_GPN_FT, fc_disc_gpn_ft_resp, - disc, lport->e_d_tov)) + disc, 3 * lport->r_a_tov)) return; err: fc_disc_error(disc, NULL); @@ -654,7 +654,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, if (!fp) return -ENOMEM; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, - fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) + fc_disc_gpn_id_resp, rdata, + 3 * lport->r_a_tov)) return -ENOMEM; kref_get(&rdata->kref); return 0; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 1bcc5e11d2c0..c841d547c298 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1191,7 +1191,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, - fc_lport_scr_resp, lport, lport->e_d_tov)) + fc_lport_scr_resp, lport, + 2 * lport->r_a_tov)) fc_lport_error(lport, NULL); } @@ -1257,7 +1258,7 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, fc_lport_ns_resp, - lport, lport->e_d_tov)) + lport, 3 * lport->r_a_tov)) fc_lport_error(lport, fp); } @@ -1414,7 +1415,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, - fc_lport_logo_resp, lport, lport->e_d_tov)) + fc_lport_logo_resp, lport, + 2 * lport->r_a_tov)) fc_lport_error(lport, NULL); } @@ -1534,7 +1536,9 @@ void fc_lport_enter_flogi(struct fc_lport *lport) if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, lport->vport ? ELS_FDISC : ELS_FLOGI, - fc_lport_flogi_resp, lport, lport->e_d_tov)) + fc_lport_flogi_resp, lport, + lport->vport ? 2 * lport->r_a_tov : + lport->e_d_tov)) fc_lport_error(lport, NULL); } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6578968a753d..91e2ba27f7bd 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -668,7 +668,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) rdata->e_d_tov = lport->e_d_tov; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, - fc_rport_plogi_resp, rdata, lport->e_d_tov)) + fc_rport_plogi_resp, rdata, + 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); @@ -829,7 +830,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, - fc_rport_prli_resp, rdata, lport->e_d_tov)) + fc_rport_prli_resp, rdata, + 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); @@ -925,7 +927,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, - fc_rport_rtv_resp, rdata, lport->e_d_tov)) + fc_rport_rtv_resp, rdata, + 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); @@ -955,7 +958,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, - fc_rport_logo_resp, rdata, lport->e_d_tov)) + fc_rport_logo_resp, rdata, + 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); @@ -1042,7 +1046,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, - fc_rport_adisc_resp, rdata, lport->e_d_tov)) + fc_rport_adisc_resp, rdata, + 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); -- cgit v1.2.3-59-g8ed1b From cd7560cb69489c6b798b61897449989b4e972327 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Wed, 4 Nov 2009 00:38:44 +0100 Subject: [SCSI] qlogicpti: add missing parentheses `+' has a higher precedence than `?' so the condition always evaluates to true and this is preprocessed to `7*((ql) - 1)' Signed-off-by: Roel Kluin Acked-by: David S. Miller Signed-off-by: James Bottomley --- drivers/scsi/qlogicpti.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h index 9c053bbaa877..e3c74d1ee2db 100644 --- a/drivers/scsi/qlogicpti.h +++ b/drivers/scsi/qlogicpti.h @@ -43,7 +43,7 @@ * determined for each queue request anew. */ #define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */ -#define QLOGICPTI_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0) +#define QLOGICPTI_MAX_SG(ql) (4 + (((ql) > 0) ? 7*((ql) - 1) : 0)) /* mailbox command complete status codes */ #define MBOX_COMMAND_COMPLETE 0x4000 -- cgit v1.2.3-59-g8ed1b From 5917290ce9b376866b165d02a5ed88d5ecdb32d0 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 11 Sep 2009 10:20:35 -0700 Subject: [SCSI] scsi_dh: create sysfs file, dh_state for all SCSI disk devices Create the sysfs file, dh_state even if the new SCSI device is not in the any of the device handler's internal lists. Signed-Off-by: Chandra Seetharaman Acked-by: Hannes Reinecke Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 6f7f798910e8..e19a1a55270c 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -304,18 +304,15 @@ static int scsi_dh_notifier(struct notifier_block *nb, sdev = to_scsi_device(dev); if (action == BUS_NOTIFY_ADD_DEVICE) { + err = device_create_file(dev, &scsi_dh_state_attr); + /* don't care about err */ devinfo = device_handler_match(NULL, sdev); - if (!devinfo) - goto out; - - err = scsi_dh_handler_attach(sdev, devinfo); - if (!err) - err = device_create_file(dev, &scsi_dh_state_attr); + if (devinfo) + err = scsi_dh_handler_attach(sdev, devinfo); } else if (action == BUS_NOTIFY_DEL_DEVICE) { device_remove_file(dev, &scsi_dh_state_attr); scsi_dh_handler_detach(sdev, NULL); } -out: return err; } -- cgit v1.2.3-59-g8ed1b From d139b9bd0e52dda14fd13412e7096e68b56d0076 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 5 Nov 2009 13:33:12 -0600 Subject: [SCSI] scsi_lib_dma: fix bug with dma maps on nested scsi objects Some of our virtual SCSI hosts don't have a proper bus parent at the top, which can be a problem for doing DMA on them This patch makes the host device cache a pointer to the physical bus device and provides an extra API for setting it (the normal API picks it up from the parent). This patch also modifies the qla2xxx and lpfc vport logic to use the new DMA host setting API. Acked-By: James Smart Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/hosts.c | 13 ++++++++++--- drivers/scsi/lpfc/lpfc_init.c | 2 +- drivers/scsi/qla2xxx/qla_attr.c | 3 ++- drivers/scsi/scsi_lib_dma.c | 4 ++-- include/scsi/scsi_host.h | 16 +++++++++++++++- 5 files changed, 30 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 5fd2da494d08..28a753d796f3 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -180,14 +180,20 @@ void scsi_remove_host(struct Scsi_Host *shost) EXPORT_SYMBOL(scsi_remove_host); /** - * scsi_add_host - add a scsi host + * scsi_add_host_with_dma - add a scsi host with dma device * @shost: scsi host pointer to add * @dev: a struct device of type scsi class + * @dma_dev: dma device for the host + * + * Note: You rarely need to worry about this unless you're in a + * virtualised host environments, so use the simpler scsi_add_host() + * function instead. * * Return value: * 0 on success / != 0 for error **/ -int scsi_add_host(struct Scsi_Host *shost, struct device *dev) +int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, + struct device *dma_dev) { struct scsi_host_template *sht = shost->hostt; int error = -EINVAL; @@ -207,6 +213,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev) if (!shost->shost_gendev.parent) shost->shost_gendev.parent = dev ? dev : &platform_bus; + shost->dma_dev = dma_dev; error = device_add(&shost->shost_gendev); if (error) @@ -262,7 +269,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev) fail: return error; } -EXPORT_SYMBOL(scsi_add_host); +EXPORT_SYMBOL(scsi_add_host_with_dma); static void scsi_host_dev_release(struct device *dev) { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 562d8cee874b..f913f1e93635 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2408,7 +2408,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; - error = scsi_add_host(shost, dev); + error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fbcb82a2f7f4..21e2bc4d7401 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1654,7 +1654,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); } - if (scsi_add_host(vha->host, &fc_vport->dev)) { + if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, + &ha->pdev->dev)) { DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", vha->host_no, vha->vp_idx)); goto vport_create_failed_2; diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c index ac6855cd2657..dcd128583b89 100644 --- a/drivers/scsi/scsi_lib_dma.c +++ b/drivers/scsi/scsi_lib_dma.c @@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd) int nseg = 0; if (scsi_sg_count(cmd)) { - struct device *dev = cmd->device->host->shost_gendev.parent; + struct device *dev = cmd->device->host->dma_dev; nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); @@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map); void scsi_dma_unmap(struct scsi_cmnd *cmd) { if (scsi_sg_count(cmd)) { - struct device *dev = cmd->device->host->shost_gendev.parent; + struct device *dev = cmd->device->host->dma_dev; dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 603054d8f40c..6ff6bc18e294 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -682,6 +682,12 @@ struct Scsi_Host { */ void *shost_data; + /* + * Points to the physical bus device we'd use to do DMA + * Needed just in case we have virtual hosts. + */ + struct device *dma_dev; + /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force @@ -726,7 +732,9 @@ extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern void scsi_flush_work(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); -extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); +extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, + struct device *, + struct device *); extern void scsi_scan_host(struct Scsi_Host *); extern void scsi_rescan_device(struct device *); extern void scsi_remove_host(struct Scsi_Host *); @@ -737,6 +745,12 @@ extern const char *scsi_host_state_name(enum scsi_host_state); extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); +static inline int __must_check scsi_add_host(struct Scsi_Host *host, + struct device *dev) +{ + return scsi_add_host_with_dma(host, dev, dev); +} + static inline struct device *scsi_get_device(struct Scsi_Host *shost) { return shost->shost_gendev.parent; -- cgit v1.2.3-59-g8ed1b From d0b68041bdd0e5ea6dae1210541bf124443d72ec Mon Sep 17 00:00:00 2001 From: jack_wang Date: Thu, 5 Nov 2009 22:32:31 +0800 Subject: [SCSI] pm8001: add reinitialize SPC parameters before phy start Signed-off-by: Jack Wang Signed-off-by: Lindar Liu Signed-off-by: James Bottomley --- drivers/scsi/pm8001/pm8001_hwi.c | 76 +++++++++++++++++++++++++++++++++------- drivers/scsi/pm8001/pm8001_hwi.h | 19 ++++++++++ drivers/scsi/pm8001/pm8001_sas.c | 1 + drivers/scsi/pm8001/pm8001_sas.h | 1 + 4 files changed, 85 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index aa5756fe0574..d18c2635995f 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -57,9 +57,9 @@ static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); pm8001_ha->main_cfg_tbl.inbound_queue_offset = - pm8001_mr32(address, 0x1C); + pm8001_mr32(address, MAIN_IBQ_OFFSET); pm8001_ha->main_cfg_tbl.outbound_queue_offset = - pm8001_mr32(address, 0x20); + pm8001_mr32(address, MAIN_OBQ_OFFSET); pm8001_ha->main_cfg_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); @@ -124,7 +124,7 @@ read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; for (i = 0; i < inbQ_num; i++) { - u32 offset = i * 0x24; + u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); pm8001_ha->inbnd_q_tbl[i].pi_offset = @@ -231,7 +231,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = pm8001_ha->memoryMap.region[PI].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (0 << 16) | (0 << 24); + 0 | (10 << 16) | (0 << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = pm8001_ha->memoryMap.region[PI].virt_ptr; offsetob = i * 0x24; @@ -375,13 +375,16 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) { u32 offset; u32 value; - u32 i; + u32 i, j; + u32 bit_cnt; #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 -#define PHY_SSC_BIT_SHIFT 13 +#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 +#define PHY_G3_WITH_SSC_BIT_SHIFT 13 +#define SNW3_PHY_CAPABILITIES_PARITY 31 /* * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) @@ -393,10 +396,22 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) for (i = 0; i < 4; i++) { offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; value = pm8001_cr32(pm8001_ha, 2, offset); - if (SSCbit) - value = value | (0x00000001 << PHY_SSC_BIT_SHIFT); + if (SSCbit) { + value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT; + value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT); + } else { + value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT; + value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT); + } + bit_cnt = 0; + for (j = 0; j < 31; j++) + if ((value >> j) & 0x00000001) + bit_cnt++; + if (bit_cnt % 2) + value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY); else - value = value & (~(0x00000001<> j) & 0x00000001) + bit_cnt++; + if (bit_cnt % 2) + value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY); else - value = value & (~(0x00000001<ccb_info[tag]; + ccb->ccb_tag = tag; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.SSAHOLT = cpu_to_le32(0xd << 25); + payload.sata_hol_tmo = cpu_to_le32(80); + payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); + rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return rc; } @@ -4367,5 +4418,6 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_nvmd_req = pm8001_chip_set_nvmd_req, .fw_flash_update_req = pm8001_chip_fw_flash_update_req, .set_dev_state_req = pm8001_chip_set_dev_state_req, + .sas_re_init_req = pm8001_chip_sas_re_initialization, }; diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index 3690a2ba0eb2..96e4daa68b8f 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -490,6 +490,25 @@ struct set_dev_state_req { u32 reserved[12]; } __attribute__((packed, aligned(4))); +/* + * brief the data structure of sas_re_initialization + */ +struct sas_re_initialization_req { + + __le32 tag; + __le32 SSAHOLT;/* bit29-set max port; + ** bit28-set open reject cmd retries. + ** bit27-set open reject data retries. + ** bit26-set open reject option, remap:1 or not:0. + ** bit25-set sata head of line time out. + */ + __le32 reserved_maxPorts; + __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16; + * data retries: bit15-bit0. + */ + __le32 sata_hol_tmo; + u32 reserved1[10]; +} __attribute__((packed, aligned(4))); /* * brief the data structure of SATA Start Command diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 7bf30fa6963a..1e840fd15160 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -240,6 +240,7 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 14c676bbb533..ed6dbd193aa1 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -153,6 +153,7 @@ struct pm8001_dispatch { u32 state); int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha, u32 state); + int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); }; struct pm8001_chip_info { -- cgit v1.2.3-59-g8ed1b From 72d0baa089ebd058cdb8b87fde835e9157c4597a Mon Sep 17 00:00:00 2001 From: jack_wang Date: Thu, 5 Nov 2009 22:33:35 +0800 Subject: [SCSI] pm8001: enhance IOMB process modules We set interupt cascading count of outbound queue to get better performance, correct some unnecessary return values and some noisy print messages. patch attached. Signed-off-by: Jack Wang Signed-off-by: Lindar Liu Signed-off-by: James Bottomley --- drivers/scsi/pm8001/pm8001_hwi.c | 245 ++++++++++++++++++++++----------------- drivers/scsi/pm8001/pm8001_sas.h | 2 +- 2 files changed, 141 insertions(+), 106 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index d18c2635995f..a3de306b9045 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -341,7 +341,7 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) * @pm8001_ha : our hba card infomation * @shiftValue : shifting value in memory bar. */ -static u32 bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) +static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) { u32 regVal; u32 max_wait_count; @@ -1217,7 +1217,7 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ, +static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1257,7 +1257,7 @@ static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ, * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, u32 opCode, void *payload) { @@ -1270,7 +1270,7 @@ static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_printk("No free mpi buffer \n")); return -1; } - + BUG_ON(!payload); /*Copy to the payload*/ memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); @@ -1289,10 +1289,30 @@ static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, +static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; + struct mpi_msg_hdr *msgHeader; + struct mpi_msg_hdr *pOutBoundMsgHeader; + + msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); + pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + + circularQ->consumer_idx * 64); + if (pOutBoundMsgHeader != msgHeader) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("consumer_idx = %d msgHeader = %p\n", + circularQ->consumer_idx, msgHeader)); + + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("consumer_idx = %d producer_index = %d" + "msgHeader = %p\n", circularQ->consumer_idx, + circularQ->producer_index, msgHeader)); + return 0; + } /* free the circular queue buffer elements associated with the message*/ circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256; /* update the CI of outbound queue */ @@ -1324,8 +1344,6 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, do { /* If there are not-yet-delivered messages ... */ if (circularQ->producer_index != circularQ->consumer_idx) { - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("process an IOMB\n")); /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + @@ -1342,34 +1360,43 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, *pBC = (u8)((msgHeader_tmp >> 24) & 0x1f); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("mpi_msg_consume" - ": CI=%d PI=%d msgHeader=%x\n", + pm8001_printk(": CI=%d PI=%d " + "msgHeader=%x\n", circularQ->consumer_idx, circularQ->producer_index, msgHeader_tmp)); return MPI_IO_STATUS_SUCCESS; } else { - u32 producer_index; - void *pi_virt = circularQ->pi_virt; - /* free the circular queue buffer - elements associated with the message*/ circularQ->consumer_idx = (circularQ->consumer_idx + ((msgHeader_tmp >> 24) & 0x1f)) % 256; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); /* update the CI of outbound queue */ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, circularQ->consumer_idx); - /* Update the producer index from SPC */ - producer_index = - pm8001_read_32(pi_virt); - circularQ->producer_index = - cpu_to_le32(producer_index); } - } else + } else { + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((msgHeader_tmp >> 24) & 0x1f)) % 256; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); return MPI_IO_STATUS_FAIL; + } + } else { + u32 producer_index; + void *pi_virt = circularQ->pi_virt; + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); } } while (circularQ->producer_index != circularQ->consumer_idx); /* while we don't have any more not-yet-delivered message */ @@ -1441,7 +1468,7 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ -static int +static void mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; @@ -1461,14 +1488,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_dev = ccb->device; param = le32_to_cpu(psspPayload->param); - PM8001_IO_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_COMP\n")); t = ccb->task; - if (status) + if (status && status != IO_UNDERFLOW) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sas IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) - return -1; + return; ts = &t->task_status; switch (status) { case IO_SUCCESS: @@ -1541,7 +1567,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; - ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, @@ -1581,6 +1607,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, @@ -1656,7 +1683,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_satus = %x \n ", + pm8001_printk("scsi_status = %x \n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -1675,11 +1702,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) mb();/* in order to force CPU ordering */ t->task_done(t); } - return 0; } /*See the comments for mpi_ssp_completion */ -static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; unsigned long flags; @@ -1700,7 +1726,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sas IO status 0x%x\n", event)); if (unlikely(!t || !t->lldd_task || !t->dev)) - return -1; + return; ts = &t->task_status; PM8001_IO_DBG(pm8001_ha, pm8001_printk("port_id = %x,device_id = %x\n", @@ -1747,7 +1773,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; - ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, @@ -1787,6 +1813,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, @@ -1840,7 +1867,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) case IO_XFER_CMD_FRAME_ISSUED: PM8001_IO_DBG(pm8001_ha, pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); - return 0; + return; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); @@ -1866,11 +1893,10 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) mb();/* in order to force CPU ordering */ t->task_done(t); } - return 0; } /*See the comments for mpi_ssp_completion */ -static int +static void mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; @@ -1898,7 +1924,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sata IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) - return -1; + return; switch (status) { case IO_SUCCESS: @@ -2015,7 +2041,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*in order to force CPU ordering*/ t->task_done(t); - return 0; + return; } break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: @@ -2033,7 +2059,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ t->task_done(t); - return 0; + return; } break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: @@ -2059,7 +2085,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto*/ t->task_done(t); - return 0; + return; } break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: @@ -2124,7 +2150,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ t->task_done(t); - return 0; + return; } break; case IO_DS_IN_RECOVERY: @@ -2146,7 +2172,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ t->task_done(t); - return 0; + return; } break; case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: @@ -2180,11 +2206,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) mb();/* ditto */ t->task_done(t); } - return 0; } /*See the comments for mpi_ssp_completion */ -static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; unsigned long flags; @@ -2205,7 +2230,7 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sata IO status 0x%x\n", event)); if (unlikely(!t || !t->lldd_task || !t->dev)) - return -1; + return; ts = &t->task_status; PM8001_IO_DBG(pm8001_ha, pm8001_printk("port_id = %x,device_id = %x\n", @@ -2268,7 +2293,7 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ t->task_done(t); - return 0; + return; } break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: @@ -2382,11 +2407,10 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) mb();/* in order to force CPU ordering */ t->task_done(t); } - return 0; } /*See the comments for mpi_ssp_completion */ -static int +static void mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 param; @@ -2412,7 +2436,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("smp IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) - return -1; + return; switch (status) { case IO_SUCCESS: @@ -2585,7 +2609,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) mb();/* in order to force CPU ordering */ t->task_done(t); } - return 0; } static void @@ -2682,8 +2705,8 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", (dlen_status & NVMD_LEN) >> 24)); } - memcpy((void *)(fw_control_context->usrAddr), - (void *)(pm8001_ha->memoryMap.region[NVMD].virt_ptr), + memcpy(fw_control_context->usrAddr, + pm8001_ha->memoryMap.region[NVMD].virt_ptr, fw_control_context->len); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; @@ -3184,28 +3207,28 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) (struct task_abort_resp *)(piomb + 4); ccb = &pm8001_ha->ccb_info[pPayload->tag]; t = ccb->task; - ts = &t->task_status; - if (t == NULL) - return -1; status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); scp = le32_to_cpu(pPayload->scp); PM8001_IO_DBG(pm8001_ha, pm8001_printk(" status = 0x%x\n", status)); + if (t == NULL) + return -1; + ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("task abort failed tag = 0x%x," - " scp= 0x%x\n", tag, scp)); + pm8001_printk("task abort failed status 0x%x ," + "tag = 0x%x, scp= 0x%x\n", status, tag, scp)); switch (status) { case IO_SUCCESS: - PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_GOOD; break; case IO_NOT_VALID: - PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); + PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); ts->resp = TMF_RESP_FUNC_FAILED; break; } @@ -3443,7 +3466,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 pHeader = (u32)*(u32 *)piomb; u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); - PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:\n")); + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:")); switch (opc) { case OPC_OUB_ECHO: @@ -3609,17 +3632,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) struct outbound_queue_table *circularQ; void *pMsg1 = NULL; u8 bc = 0; - u32 ret = MPI_IO_STATUS_FAIL, processedMsgCount = 0; + u32 ret = MPI_IO_STATUS_FAIL; circularQ = &pm8001_ha->outbnd_q_tbl[0]; do { ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ - process_one_iomb(pm8001_ha, (void *)((u8 *)pMsg1 - 4)); + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, circularQ, bc); - processedMsgCount++; + mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { u32 producer_idx; @@ -3631,8 +3653,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) /* OQ is empty */ break; } - } while (100 > processedMsgCount);/*end message processing if hit the - count*/ + } while (1); return ret; } @@ -3743,6 +3764,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev = dev->lldd_dev; struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; + int ret; __le64 phys_addr; struct inbound_queue_table *circularQ; u32 opc = OPC_INB_SSPINIIOSTART; @@ -3780,8 +3802,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + return ret; } static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, @@ -3791,6 +3813,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, struct domain_device *dev = task->dev; struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; u32 tag = ccb->ccb_tag; + int ret; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; __le64 phys_addr; @@ -3849,8 +3872,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + return ret; } /** @@ -3864,6 +3887,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_start_req payload; struct inbound_queue_table *circularQ; + int ret; u32 tag = 0x01; u32 opcode = OPC_INB_PHYSTART; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -3883,8 +3907,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + return ret; } /** @@ -3898,14 +3922,15 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, { struct phy_stop_req payload; struct inbound_queue_table *circularQ; + int ret; u32 tag = 0x01; u32 opcode = OPC_INB_PHYSTOP; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + return ret; } /** @@ -3919,7 +3944,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 stp_sspsmp_sata = 0x4; struct inbound_queue_table *circularQ; u32 linkrate, phy_id; - u32 rc, tag = 0xdeadbeef; + int rc, tag = 0xdeadbeef; struct pm8001_ccb_info *ccb; u8 retryFlag = 0x1; u16 firstBurstSize = 0; @@ -3963,8 +3988,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); - return 0; + rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return rc; } /** @@ -3975,16 +4000,17 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, { struct dereg_dev_req payload; u32 opc = OPC_INB_DEREG_DEV_HANDLE; + int ret; struct inbound_queue_table *circularQ; circularQ = &pm8001_ha->inbnd_q_tbl[0]; - memset((u8 *)&payload, 0, sizeof(payload)); + memset(&payload, 0, sizeof(payload)); payload.tag = 1; payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return ret; } /** @@ -3999,14 +4025,15 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, { struct local_phy_ctl_req payload; struct inbound_queue_table *circularQ; + int ret; u32 opc = OPC_INB_LOCAL_PHY_CONTROL; memset((u8 *)&payload, 0, sizeof(payload)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = 1; payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return ret; } static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) @@ -4028,12 +4055,16 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @irq: irq number. * @stat: stat. */ -static void +static irqreturn_t pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) { + unsigned long flags; + spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_chip_interrupt_disable(pm8001_ha); process_oq(pm8001_ha); pm8001_chip_interrupt_enable(pm8001_ha); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return IRQ_HANDLED; } static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, @@ -4041,7 +4072,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, { struct task_abort_req task_abort; struct inbound_queue_table *circularQ; - + int ret; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&task_abort, 0, sizeof(task_abort)); if (ABORT_SINGLE == (flag & ABORT_MASK)) { @@ -4054,8 +4085,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + return ret; } /** @@ -4068,7 +4099,8 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_IO_DBG(pm8001_ha, pm8001_printk("Abort tag[%x]", task_tag)); + PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" + " = %x", cmd_tag, task_tag)); if (pm8001_dev->dev_type == SAS_END_DEV) opc = OPC_INB_SSP_ABORT; else if (pm8001_dev->dev_type == SATA_DEV) @@ -4079,7 +4111,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, rc = send_task_abort(pm8001_ha, opc, device_id, flag, task_tag, cmd_tag); if (rc != TMF_RESP_FUNC_COMPLETE) - PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc)); + PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc)); return rc; } @@ -4098,17 +4130,17 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, u32 opc = OPC_INB_SSPINITMSTART; struct inbound_queue_table *circularQ; struct ssp_ini_tm_start_req sspTMCmd; + int ret; memset(&sspTMCmd, 0, sizeof(sspTMCmd)); sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed); sspTMCmd.tmf = cpu_to_le32(tmf->tmf); - sspTMCmd.ds_ads_m = cpu_to_le32(1 << 2); memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + return ret; } static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, @@ -4116,7 +4148,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, { u32 opc = OPC_INB_GET_NVMD_DATA; u32 nvmd_type; - u32 rc; + int rc; u32 tag; struct pm8001_ccb_info *ccb; struct inbound_queue_table *circularQ; @@ -4183,8 +4215,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); - return 0; + rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + return rc; } static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, @@ -4192,7 +4224,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, { u32 opc = OPC_INB_SET_NVMD_DATA; u32 nvmd_type; - u32 rc; + int rc; u32 tag; struct pm8001_ccb_info *ccb; struct inbound_queue_table *circularQ; @@ -4259,8 +4291,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); - return 0; + rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + return rc; } /** @@ -4275,9 +4307,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, struct fw_flash_Update_req payload; struct fw_flash_updata_info *info; struct inbound_queue_table *circularQ; + int ret; u32 opc = OPC_INB_FW_FLASH_UPDATE; - memset((u8 *)&payload, 0, sizeof(struct fw_flash_Update_req)); + memset(&payload, 0, sizeof(struct fw_flash_Update_req)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; info = fw_flash_updata_info; payload.tag = cpu_to_le32(tag); @@ -4287,8 +4320,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, payload.len = info->sgl.im_len.len ; payload.sgl_addr_lo = lower_32_bits(info->sgl.addr); payload.sgl_addr_hi = upper_32_bits(info->sgl.addr); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); - return 0; + ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return ret; } static int @@ -4298,7 +4331,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, struct fw_flash_updata_info flash_update_info; struct fw_control_info *fw_control; struct fw_control_ex *fw_control_context; - u32 rc; + int rc; u32 tag; struct pm8001_ccb_info *ccb; void *buffer = NULL; @@ -4321,8 +4354,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; } } - memset((void *)buffer, 0, fw_control->len); - memcpy((void *)buffer, fw_control->buffer, fw_control->len); + memset(buffer, 0, fw_control->len); + memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); flash_update_info.sgl.im_len.e = 0; @@ -4338,8 +4371,9 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; ccb->ccb_tag = tag; - pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); - return 0; + rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, + tag); + return rc; } static int @@ -4349,10 +4383,10 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct set_dev_state_req payload; struct inbound_queue_table *circularQ; struct pm8001_ccb_info *ccb; - u32 rc; + int rc; u32 tag; u32 opc = OPC_INB_SET_DEVICE_STATE; - memset((u8 *)&payload, 0, sizeof(payload)); + memset(&payload, 0, sizeof(payload)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) return -1; @@ -4363,8 +4397,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); - return 0; + rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + return rc; + } static int diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ed6dbd193aa1..30f2ede55a75 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -119,7 +119,7 @@ struct pm8001_dispatch { void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - void (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); -- cgit v1.2.3-59-g8ed1b From 97ee20886cfd257a7818087c1638ca60b9ffd192 Mon Sep 17 00:00:00 2001 From: jack_wang Date: Thu, 5 Nov 2009 22:33:51 +0800 Subject: [SCSI] pm8001: Fixes for tag alloc, error goto and code cleanup Allocate right size for bitmap tag,fix error goto and cleanup print message and undocable commemts. patch attached. Signed-off-by: Lindar Liu Signed-off-by: Jack Wang Signed-off-by: James Bottomley --- drivers/scsi/pm8001/pm8001_init.c | 11 ++++--- drivers/scsi/pm8001/pm8001_sas.c | 60 +++++++++++++++++++-------------------- 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 811b5d36d5f0..42ebe725d5a5 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -203,9 +203,9 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha) for (i = 0; i < pm8001_ha->chip->n_phy; i++) pm8001_phy_init(pm8001_ha, i); - pm8001_ha->tags = kmalloc(sizeof(*pm8001_ha->tags)*PM8001_MAX_DEVICES, - GFP_KERNEL); - + pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); + if (!pm8001_ha->tags) + goto err_out; /* MPI Memory region 1 for AAP Event Log for fw */ pm8001_ha->memoryMap.region[AAP1].num_elements = 1; pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; @@ -287,6 +287,9 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->ccb_info[i].ccb_dma_handle = pm8001_ha->memoryMap.region[CCB_MEM].phys_addr + i * sizeof(struct pm8001_ccb_info); + pm8001_ha->ccb_info[i].task = NULL; + pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff; + pm8001_ha->ccb_info[i].device = NULL; ++pm8001_ha->tags_num; } pm8001_ha->flags = PM8001F_INIT_TIME; @@ -578,7 +581,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; irq_handler_t irq_handler = pm8001_interrupt; - u32 rc; + int rc; pdev = pm8001_ha->pdev; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 1e840fd15160..1f767a0e727a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -330,15 +330,12 @@ int pm8001_slave_configure(struct scsi_device *sdev) return 0; } /** - * pm8001_task_exec -execute the task which come from upper level, send the - * command or data to DMA area and then increase CI,for queuecommand(ssp), - * it is from upper layer and for smp command,it is from libsas, - * for ata command it is from libata. + * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. * @task: the task to be execute. * @num: if can_queue great than 1, the task can be queued up. for SMP task, * we always execute one one time. * @gfp_flags: gfp_flags. - * @is tmf: if it is task management task. + * @is_tmf: if it is task management task. * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ @@ -379,7 +376,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, pm8001_printk("device %016llx not " "ready.\n", SAS_ADDR(dev->sas_addr))); } - rc = SAS_PHY_DOWN; + rc = SAS_PHY_DOWN; goto out_done; } rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -395,14 +392,14 @@ static int pm8001_task_exec(struct sas_task *task, const int num, t->data_dir); if (!n_elem) { rc = -ENOMEM; - goto err_out; + goto err_out_tag; } } } else { n_elem = t->num_scatter; } - t->lldd_task = NULL; + t->lldd_task = ccb; ccb->n_elem = n_elem; ccb->ccb_tag = tag; ccb->task = t; @@ -435,7 +432,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num, pm8001_printk("rc is %x\n", rc)); goto err_out_tag; } - t->lldd_task = ccb; /* TODO: select normal or high priority */ spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; @@ -518,8 +514,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_alloc_dev - find the empty pm8001_device structure, allocate and - * return it for use. + * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information */ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) @@ -550,14 +545,16 @@ static void pm8001_free_dev(struct pm8001_device *pm8001_dev) } /** - * pm8001_dev_found_notify - when libsas find a sas domain device, it should - * tell the LLDD that device is found, and then LLDD register this device to - * HBA FW by the command "OPC_INB_REG_DEV", after that the HBA will assign - * a device ID(according to device's sas address) and returned it to LLDD.from + * pm8001_dev_found_notify - libsas notify a device is found. + * @dev: the device structure which sas layer used. + * + * when libsas find a sas domain device, it should tell the LLDD that + * device is found, and then LLDD register this device to HBA firmware + * by the command "OPC_INB_REG_DEV", after that the HBA will assign a + * device ID(according to device's sas address) and returned it to LLDD. From * now on, we communicate with HBA FW with the device ID which HBA assigned * rather than sas address. it is the neccessary step for our HBA but it is * the optional for other HBA driver. - * @dev: the device structure which sas layer used. */ static int pm8001_dev_found_notify(struct domain_device *dev) { @@ -665,14 +662,15 @@ static void pm8001_tmf_timedout(unsigned long data) #define PM8001_TASK_TIMEOUT 20 /** - * pm8001_exec_internal_tmf_task - when errors or exception happened, we may - * want to do something, for example abort issued task which result in this - * execption, this is by calling this function, note it is also with the task - * execute interface. + * pm8001_exec_internal_tmf_task - execute some task management commands. * @dev: the wanted device. * @tmf: which task management wanted to be take. * @para_len: para_len. * @parameter: ssp task parameter. + * + * when errors or exception happened, we may want to do something, for example + * abort the issued task which result in this execption, it is done by calling + * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) @@ -737,9 +735,9 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -EMSGSIZE; break; } else { - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" Task to dev %016llx response: 0x%x" - "status 0x%x\n", + PM8001_EH_DBG(pm8001_ha, + pm8001_printk(" Task to dev %016llx response:" + "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); @@ -760,7 +758,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, u32 task_tag) { int res, retry; - u32 rc, ccb_tag; + u32 ccb_tag; struct pm8001_ccb_info *ccb; struct sas_task *task = NULL; @@ -777,9 +775,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; add_timer(&task->timer); - rc = pm8001_tag_alloc(pm8001_ha, &ccb_tag); - if (rc) - return rc; + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return res; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; @@ -812,7 +810,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, break; } else { - PM8001_IO_DBG(pm8001_ha, + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" Task to dev %016llx response: " "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), @@ -1027,11 +1025,11 @@ int pm8001_abort_task(struct sas_task *task) } device_id = pm8001_dev->device_id; PM8001_EH_DBG(pm8001_ha, - pm8001_printk("abort io to device_id = %d\n", device_id)); - tmf_task.tmf = TMF_ABORT_TASK; + pm8001_printk("abort io to deviceid= %d\n", device_id)); + tmf_task.tmf = TMF_ABORT_TASK; tmf_task.tag_of_task_to_be_managed = tag; rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); - rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, + pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { -- cgit v1.2.3-59-g8ed1b From 2bc1c59dbdefdb6f9767e06efb86bbdb2923a8be Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 5 Nov 2009 11:18:09 -0600 Subject: [SCSI] fc class: fail fast bsg requests If the port state is blocked and the fast io fail tmo has fired then this patch will fail bsg requests immediately. This is needed if userspace is sending IOs to test the transport like with fcping, so it will not have to wait for the dev loss tmo. With this patch he bsg req fast io fail code behaves like the normal and sg io/passthrough fast io fail. Signed-off-by: Mike Christie Acked-By: James Smart Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3ce56b3b2cd7..600502aa3b61 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3809,8 +3809,9 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, return; while (!blk_queue_plugged(q)) { - if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED)) - break; + if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) + break; req = blk_fetch_request(q); if (!req) -- cgit v1.2.3-59-g8ed1b From 3f9daedfcb197d784c6e7ecd731e3aa9859bc951 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 5 Nov 2009 11:37:28 -0600 Subject: [SCSI] add scsi target reset support to scsi ioctl The scsi ioctl code path was missing scsi target reset support. This patch just adds it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_ioctl.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index b98f763931c5..d9564fb04f62 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; + case SG_SCSI_RESET_TARGET: + val = SCSI_TRY_RESET_TARGET; + break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; -- cgit v1.2.3-59-g8ed1b From 627511e3e67553b04f6917c03e39b797df210e04 Mon Sep 17 00:00:00 2001 From: Takahiro Yasui Date: Tue, 10 Nov 2009 16:22:19 -0500 Subject: [SCSI] scsi_devinfo: update Hitachi entries (v2) Four models, OPEN-/DF400/DF500/DISK-SUBSYSTEM, can handle REPORT_LUN, and the BLIST_REPORTLUN2 flag needs to be set. And DF600 doesn't require any flags because it returns ANSI 03h (SPC). Signed-off-by: Takahiro Yasui Signed-off-by: James Bottomley --- drivers/scsi/scsi_devinfo.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 93c2622cb969..802e91c8892e 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -168,11 +168,10 @@ static struct { {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, - {"HITACHI", "DF400", "*", BLIST_SPARSELUN}, - {"HITACHI", "DF500", "*", BLIST_SPARSELUN}, - {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, - {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, - {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, + {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, + {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, + {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, + {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, -- cgit v1.2.3-59-g8ed1b From 24246de77503978cfcd7e76f06404e60e399992f Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:30 -0600 Subject: [SCSI] bnx2i: use common iscsi suspend queue This just has bnx2i use the iscsi_suspend_queue helper. The suspend works as follows: When ep_poll has succeeed iscsid will call conn_bind, the LLD will then call iscsi_conn_bind which will clear the suspend bit. When ep_disconnect is called (or if there is a conn error) we set the suspend bit. For the ep_disconnect case I added a helper in the previous kernel that will take the session lock to make sure iscsi_queuecommand/xmit_task is not running and it will set the suspend bit. Signed-off-by: Mike Christie Acked-by: Anil Veerabhadrappa Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i.h | 1 - drivers/scsi/bnx2i/bnx2i_iscsi.c | 8 +------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 5edde1a8c04d..2b973f3c2eb2 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -232,7 +232,6 @@ struct bnx2i_conn { struct iscsi_cls_conn *cls_conn; struct bnx2i_hba *hba; struct completion cmd_cleanup_cmpl; - int is_bound; u32 iscsi_conn_cid; #define BNX2I_CID_RESERVED 0x5AFF diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index cafb888c2376..89e84c302aa0 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1161,9 +1161,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task) struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; - if (!bnx2i_conn->is_bound) - return -ENOTCONN; - /* * If there is no scsi_cmnd this must be a mgmt task */ @@ -1371,7 +1368,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, bnx2i_conn->ep = bnx2i_ep; bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; - bnx2i_conn->is_bound = 1; ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, bnx2i_ep->ep_iscsi_cid); @@ -1896,9 +1892,7 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) conn = bnx2i_conn->cls_conn->dd_data; session = conn->session; - spin_lock_bh(&session->lock); - bnx2i_conn->is_bound = 0; - spin_unlock_bh(&session->lock); + iscsi_suspend_queue(conn); } hba = bnx2i_ep->hba; -- cgit v1.2.3-59-g8ed1b From 4f704dc03297406ea5d53b85c4666c60f69000bf Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:31 -0600 Subject: [SCSI] libiscsi: fix login/text checks in pdu injection code For some reason we used to check for the the immediate bit set and the opcocde in many places instead of just masking the opcode. In the passthrough code this is a problem because userspace may or may not have set the immediate bit and it does not have to. This fixes up the opcode checks in the passthrough code, so we mask off the opcode then check against the iscsi proto definition like is done in other places. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 67d0f3fc8ac0..8c29480fc02b 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -577,12 +577,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, struct iscsi_session *session = conn->session; struct iscsi_hdr *hdr = task->hdr; struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; + uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (conn->session->state == ISCSI_STATE_LOGGING_OUT) return -ENOTCONN; - if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && - hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) + if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) nop->exp_statsn = cpu_to_be32(conn->exp_statsn); /* * pre-format CmdSN for outgoing PDU. @@ -590,9 +590,12 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, nop->cmdsn = cpu_to_be32(session->cmdsn); if (hdr->itt != RESERVED_ITT) { /* - * TODO: We always use immediate, so we never hit this. + * TODO: We always use immediate for normal session pdus. * If we start to send tmfs or nops as non-immediate then * we should start checking the cmdsn numbers for mgmt tasks. + * + * During discovery sessions iscsid sends TEXT as non immediate, + * but we always only send one PDU at a time. */ if (conn->c_stage == ISCSI_CONN_STARTED && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { @@ -620,22 +623,28 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, { struct iscsi_session *session = conn->session; struct iscsi_host *ihost = shost_priv(session->host); + uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; struct iscsi_task *task; itt_t itt; if (session->state == ISCSI_STATE_TERMINATE) return NULL; - if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || - hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) + if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { /* * Login and Text are sent serially, in * request-followed-by-response sequence. * Same task can be used. Same ITT must be used. * Note that login_task is preallocated at conn_create(). */ + if (conn->login_task->state != ISCSI_TASK_FREE) { + iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " + "progress. Cannot start new task.\n"); + return NULL; + } + task = conn->login_task; - else { + } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; -- cgit v1.2.3-59-g8ed1b From 5d12c05e29fc8715e3e32f57a8cced9290d87c55 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:32 -0600 Subject: [SCSI] libiscsi: Check TMF state before sending PDU Patch and mail from both MikeC and HannesR: Before we're trying to send a PDU we have to check whether a TMF is active. If so and if the PDU will be affected by the TMF we should allow only Data-out PDUs to be sent. If fast_abort is set, no Data-out PDUs will be sent while a LUN reset is being processed for a affected LUN. fast_abort is now ingored during a ABORT TASK tmf. We will not send any Data-outs for a task if the task is being aborted. Signed-off-by: Mike Christie Signed-off-by: Hannes Reinecke Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 113 ++++++++++++++++++++++++++++++++++++++++----- include/scsi/iscsi_proto.h | 2 + 2 files changed, 103 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 8c29480fc02b..b6ffdc5512cd 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -265,6 +265,88 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task) return 0; } +/** + * iscsi_check_tmf_restrictions - check if a task is affected by TMF + * @task: iscsi task + * @opcode: opcode to check for + * + * During TMF a task has to be checked if it's affected. + * All unrelated I/O can be passed through, but I/O to the + * affected LUN should be restricted. + * If 'fast_abort' is set we won't be sending any I/O to the + * affected LUN. + * Otherwise the target is waiting for all TTTs to be completed, + * so we have to send all outstanding Data-Out PDUs to the target. + */ +static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_tm *tmf = &conn->tmhdr; + unsigned int hdr_lun; + + if (conn->tmf_state == TMF_INITIAL) + return 0; + + if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) + return 0; + + switch (ISCSI_TM_FUNC_VALUE(tmf)) { + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + /* + * Allow PDUs for unrelated LUNs + */ + hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); + if (hdr_lun != task->sc->device->lun) + return 0; + + /* + * Fail all SCSI cmd PDUs + */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) { + iscsi_conn_printk(KERN_INFO, conn, + "task [op %x/%x itt " + "0x%x/0x%x lun %u] " + "rejected.\n", + task->hdr->opcode, opcode, + task->itt, task->hdr_itt, hdr_lun); + return -EACCES; + } + /* + * And also all data-out PDUs in response to R2T + * if fast_abort is set. + */ + if (conn->session->fast_abort) { + iscsi_conn_printk(KERN_INFO, conn, + "task [op %x/%x itt " + "0x%x/0x%x lun %u] " + "fast abort.\n", + task->hdr->opcode, opcode, + task->itt, task->hdr_itt, hdr_lun); + return -EACCES; + } + break; + case ISCSI_TM_FUNC_ABORT_TASK: + /* + * the caller has already checked if the task + * they want to abort was in the pending queue so if + * we are here the cmd pdu has gone out already, and + * we will only hit this for data-outs + */ + if (opcode == ISCSI_OP_SCSI_DATA_OUT && + task->hdr_itt == tmf->rtt) { + ISCSI_DBG_SESSION(conn->session, + "Preventing task %x/%x from sending " + "data-out due to abort task in " + "progress\n", task->itt, + task->hdr_itt); + return -EACCES; + } + break; + } + + return 0; +} + /** * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu * @task: iscsi task @@ -282,6 +364,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) itt_t itt; int rc; + rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; + if (conn->session->tt->alloc_pdu) { rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); if (rc) @@ -1366,6 +1452,7 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task); **/ static int iscsi_data_xmit(struct iscsi_conn *conn) { + struct iscsi_task *task; int rc = 0; spin_lock_bh(&conn->session->lock); @@ -1403,11 +1490,8 @@ check_mgmt: /* process pending command queue */ while (!list_empty(&conn->cmdqueue)) { - if (conn->tmf_state == TMF_QUEUED) - break; - - conn->task = list_entry(conn->cmdqueue.next, - struct iscsi_task, running); + conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, + running); list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { fail_scsi_task(conn->task, DID_IMM_RETRY); @@ -1415,7 +1499,7 @@ check_mgmt: } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { - if (rc == -ENOMEM) { + if (rc == -ENOMEM || rc == -EACCES) { list_add_tail(&conn->task->running, &conn->cmdqueue); conn->task = NULL; @@ -1437,17 +1521,18 @@ check_mgmt: } while (!list_empty(&conn->requeue)) { - if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL) - break; - /* * we always do fastlogout - conn stop code will clean up. */ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) break; - conn->task = list_entry(conn->requeue.next, - struct iscsi_task, running); + task = list_entry(conn->requeue.next, struct iscsi_task, + running); + if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) + break; + + conn->task = task; list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; rc = iscsi_xmit_task(conn); @@ -1600,7 +1685,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) if (!ihost->workq) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { - if (reason == -ENOMEM) { + if (reason == -ENOMEM || reason == -EACCES) { reason = FAILURE_OOM; goto prepd_reject; } else { @@ -2120,6 +2205,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) spin_lock_bh(&session->lock); fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; + memset(hdr, 0, sizeof(*hdr)); spin_unlock_bh(&session->lock); iscsi_start_tx(conn); goto success_unlocked; @@ -2130,6 +2216,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) case TMF_NOT_FOUND: if (!sc->SCp.ptr) { conn->tmf_state = TMF_INITIAL; + memset(hdr, 0, sizeof(*hdr)); /* task completed before tmf abort response */ ISCSI_DBG_EH(session, "sc completed while abort in " "progress\n"); @@ -2224,6 +2311,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); + memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); @@ -2868,6 +2956,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, spin_lock_bh(&session->lock); fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); + memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); } diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index f2a2c1169486..dd0a52cea95a 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -279,6 +279,8 @@ struct iscsi_tm { #define ISCSI_TM_FUNC_TARGET_COLD_RESET 7 #define ISCSI_TM_FUNC_TASK_REASSIGN 8 +#define ISCSI_TM_FUNC_VALUE(hdr) ((hdr)->flags & ISCSI_FLAG_TM_FUNC_MASK) + /* SCSI Task Management Response Header */ struct iscsi_tm_rsp { uint8_t opcode; -- cgit v1.2.3-59-g8ed1b From 3fe5ae8b4c4d3a82c755074878da7ddb9dde381e Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:33 -0600 Subject: [SCSI] libiscsi: add warm target reset tmf support This implements warm target reset tmf support for the scsi-ml target reset callback. Previously we would just drop the session in that callback. This patch will now try a target reset and if that fails drop the session. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 2 +- drivers/scsi/bnx2i/bnx2i_iscsi.c | 2 +- drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 2 +- drivers/scsi/iscsi_tcp.c | 2 +- drivers/scsi/libiscsi.c | 251 +++++++++++++++++++++++++----------- drivers/scsi/scsi_transport_iscsi.c | 4 +- include/scsi/iscsi_if.h | 3 + include/scsi/libiscsi.h | 1 + 8 files changed, 190 insertions(+), 77 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index d15df07ba783..1a557fa77888 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3859,7 +3859,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 89e84c302aa0..070118a8f184 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -2028,7 +2028,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 2631bddd255e..969c83162cc4 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -937,7 +937,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index edc49ca49cea..517da3fd89d3 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -903,7 +903,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b6ffdc5512cd..07ec997c5d4f 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -298,17 +298,18 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; - + /* fall through */ + case ISCSI_TM_FUNC_TARGET_WARM_RESET: /* * Fail all SCSI cmd PDUs */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_conn_printk(KERN_INFO, conn, "task [op %x/%x itt " - "0x%x/0x%x lun %u] " + "0x%x/0x%x] " "rejected.\n", task->hdr->opcode, opcode, - task->itt, task->hdr_itt, hdr_lun); + task->itt, task->hdr_itt); return -EACCES; } /* @@ -318,10 +319,9 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) if (conn->session->fast_abort) { iscsi_conn_printk(KERN_INFO, conn, "task [op %x/%x itt " - "0x%x/0x%x lun %u] " - "fast abort.\n", + "0x%x/0x%x] fast abort.\n", task->hdr->opcode, opcode, - task->itt, task->hdr_itt, hdr_lun); + task->itt, task->hdr_itt); return -EACCES; } break; @@ -1757,72 +1757,6 @@ int iscsi_target_alloc(struct scsi_target *starget) } EXPORT_SYMBOL_GPL(iscsi_target_alloc); -void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) -{ - struct iscsi_session *session = cls_session->dd_data; - - spin_lock_bh(&session->lock); - if (session->state != ISCSI_STATE_LOGGED_IN) { - session->state = ISCSI_STATE_RECOVERY_FAILED; - if (session->leadconn) - wake_up(&session->leadconn->ehwait); - } - spin_unlock_bh(&session->lock); -} -EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); - -int iscsi_eh_target_reset(struct scsi_cmnd *sc) -{ - struct iscsi_cls_session *cls_session; - struct iscsi_session *session; - struct iscsi_conn *conn; - - cls_session = starget_to_session(scsi_target(sc->device)); - session = cls_session->dd_data; - conn = session->leadconn; - - mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); - if (session->state == ISCSI_STATE_TERMINATE) { -failed: - ISCSI_DBG_EH(session, - "failing target reset: Could not log back into " - "target [age %d]\n", - session->age); - spin_unlock_bh(&session->lock); - mutex_unlock(&session->eh_mutex); - return FAILED; - } - - spin_unlock_bh(&session->lock); - mutex_unlock(&session->eh_mutex); - /* - * we drop the lock here but the leadconn cannot be destoyed while - * we are in the scsi eh - */ - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - - ISCSI_DBG_EH(session, "wait for relogin\n"); - wait_event_interruptible(conn->ehwait, - session->state == ISCSI_STATE_TERMINATE || - session->state == ISCSI_STATE_LOGGED_IN || - session->state == ISCSI_STATE_RECOVERY_FAILED); - if (signal_pending(current)) - flush_signals(current); - - mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); - if (session->state == ISCSI_STATE_LOGGED_IN) { - ISCSI_DBG_EH(session, - "target reset succeeded\n"); - } else - goto failed; - spin_unlock_bh(&session->lock); - mutex_unlock(&session->eh_mutex); - return SUCCESS; -} -EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); - static void iscsi_tmf_timedout(unsigned long data) { struct iscsi_conn *conn = (struct iscsi_conn *)data; @@ -2329,6 +2263,172 @@ done: } EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); +void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + + spin_lock_bh(&session->lock); + if (session->state != ISCSI_STATE_LOGGED_IN) { + session->state = ISCSI_STATE_RECOVERY_FAILED; + if (session->leadconn) + wake_up(&session->leadconn->ehwait); + } + spin_unlock_bh(&session->lock); +} +EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); + +/** + * iscsi_eh_session_reset - drop session and attempt relogin + * @sc: scsi command + * + * This function will wait for a relogin, session termination from + * userspace, or a recovery/replacement timeout. + */ +static int iscsi_eh_session_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + conn = session->leadconn; + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_TERMINATE) { +failed: + ISCSI_DBG_EH(session, + "failing session reset: Could not log back into " + "%s, %s [age %d]\n", session->targetname, + conn->persistent_address, session->age); + spin_unlock_bh(&session->lock); + mutex_unlock(&session->eh_mutex); + return FAILED; + } + + spin_unlock_bh(&session->lock); + mutex_unlock(&session->eh_mutex); + /* + * we drop the lock here but the leadconn cannot be destoyed while + * we are in the scsi eh + */ + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + + ISCSI_DBG_EH(session, "wait for relogin\n"); + wait_event_interruptible(conn->ehwait, + session->state == ISCSI_STATE_TERMINATE || + session->state == ISCSI_STATE_LOGGED_IN || + session->state == ISCSI_STATE_RECOVERY_FAILED); + if (signal_pending(current)) + flush_signals(current); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_LOGGED_IN) { + ISCSI_DBG_EH(session, + "session reset succeeded for %s,%s\n", + session->targetname, conn->persistent_address); + } else + goto failed; + spin_unlock_bh(&session->lock); + mutex_unlock(&session->eh_mutex); + return SUCCESS; +} + +static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) +{ + memset(hdr, 0, sizeof(*hdr)); + hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; + hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hdr->rtt = RESERVED_ITT; +} + +/** + * iscsi_eh_target_reset - reset target + * @sc: scsi command + * + * This will attempt to send a warm target reset. If that fails + * then we will drop the session and attempt ERL0 recovery. + */ +int iscsi_eh_target_reset(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + struct iscsi_tm *hdr; + int rc = FAILED; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, + session->targetname); + + mutex_lock(&session->eh_mutex); + spin_lock_bh(&session->lock); + /* + * Just check if we are not logged in. We cannot check for + * the phase because the reset could come from a ioctl. + */ + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) + goto unlock; + conn = session->leadconn; + + /* only have one tmf outstanding at a time */ + if (conn->tmf_state != TMF_INITIAL) + goto unlock; + conn->tmf_state = TMF_QUEUED; + + hdr = &conn->tmhdr; + iscsi_prep_tgt_reset_pdu(sc, hdr); + + if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, + session->tgt_reset_timeout)) { + rc = FAILED; + goto unlock; + } + + switch (conn->tmf_state) { + case TMF_SUCCESS: + break; + case TMF_TIMEDOUT: + spin_unlock_bh(&session->lock); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + goto done; + default: + conn->tmf_state = TMF_INITIAL; + goto unlock; + } + + rc = SUCCESS; + spin_unlock_bh(&session->lock); + + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->lock); + memset(hdr, 0, sizeof(*hdr)); + fail_scsi_tasks(conn, -1, DID_ERROR); + conn->tmf_state = TMF_INITIAL; + spin_unlock_bh(&session->lock); + + iscsi_start_tx(conn); + goto done; + +unlock: + spin_unlock_bh(&session->lock); +done: + ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, + rc == SUCCESS ? "SUCCESS" : "FAILED"); + mutex_unlock(&session->eh_mutex); + + if (rc == FAILED) + rc = iscsi_eh_session_reset(sc); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); + /* * Pre-allocate a pool of @max items of @item_size. By default, the pool * should be accessed via kfifo_{get,put} on q->queue. @@ -2595,6 +2695,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, session->host = shost; session->state = ISCSI_STATE_FREE; session->fast_abort = 1; + session->tgt_reset_timeout = 30; session->lu_reset_timeout = 15; session->abort_timeout = 10; session->scsi_cmds_max = scsi_cmds; @@ -3033,6 +3134,9 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_LU_RESET_TMO: sscanf(buf, "%d", &session->lu_reset_timeout); break; + case ISCSI_PARAM_TGT_RESET_TMO: + sscanf(buf, "%d", &session->tgt_reset_timeout); + break; case ISCSI_PARAM_PING_TMO: sscanf(buf, "%d", &conn->ping_timeout); break; @@ -3132,6 +3236,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_LU_RESET_TMO: len = sprintf(buf, "%d\n", session->lu_reset_timeout); break; + case ISCSI_PARAM_TGT_RESET_TMO: + len = sprintf(buf, "%d\n", session->tgt_reset_timeout); + break; case ISCSI_PARAM_INITIAL_R2T_EN: len = sprintf(buf, "%d\n", session->initial_r2t_en); break; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ad897df36615..dc04ca124a69 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -30,7 +30,7 @@ #include #include -#define ISCSI_SESSION_ATTRS 21 +#define ISCSI_SESSION_ATTRS 22 #define ISCSI_CONN_ATTRS 13 #define ISCSI_HOST_ATTRS 4 @@ -1759,6 +1759,7 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); +iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) @@ -2000,6 +2001,7 @@ iscsi_register_transport(struct iscsi_transport *tt) SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); + SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO); SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index d67dda2b6aa0..66d377b9c72b 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -311,6 +311,8 @@ enum iscsi_param { ISCSI_PARAM_IFACE_NAME, ISCSI_PARAM_ISID, ISCSI_PARAM_INITIATOR_NAME, + + ISCSI_PARAM_TGT_RESET_TMO, /* must always be last */ ISCSI_PARAM_MAX, }; @@ -350,6 +352,7 @@ enum iscsi_param { #define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME) #define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID) #define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME) +#define ISCSI_TGT_RESET_TMO (1ULL << ISCSI_PARAM_TGT_RESET_TMO) /* iSCSI HBA params */ enum iscsi_host_param { diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 2db2bc26b1e9..7394e3bc8f4b 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -267,6 +267,7 @@ struct iscsi_session { /* configuration */ int abort_timeout; int lu_reset_timeout; + int tgt_reset_timeout; int initial_r2t_en; unsigned max_r2t; int imm_data_en; -- cgit v1.2.3-59-g8ed1b From fdd46dcbe4468a1f47a2cc9be442d11c3d21dd68 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:34 -0600 Subject: [SCSI] iscsi class: modify handling of replacement timeout This patch modifies the replacement/recovery_timeout so it works more like the fc fast io fail tmo. If userspace tries to set the replacement/recovery_timeout to less than zero, we will turn off the forced recovery cleanup. If userspace sets the value to 0 then we will force the recovery cleanup immediately. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index dc04ca124a69..ea3892e7e0f7 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -627,8 +627,10 @@ static void __iscsi_block_session(struct work_struct *work) spin_unlock_irqrestore(&session->lock, flags); scsi_target_block(&session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); - queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, - session->recovery_tmo * HZ); + if (session->recovery_tmo >= 0) + queue_delayed_work(iscsi_eh_timer_workq, + &session->recovery_work, + session->recovery_tmo * HZ); } void iscsi_block_session(struct iscsi_cls_session *session) @@ -1348,8 +1350,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); - if (value != 0) - session->recovery_tmo = value; + session->recovery_tmo = value; break; default: err = transport->set_param(conn, ev->u.set_param.param, -- cgit v1.2.3-59-g8ed1b From b20d038dff877566694181578c49c31616d622cd Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:35 -0600 Subject: [SCSI] iser: set tgt and lu reset timeout When iser enabled lu reset support it did not set the bit to allow userspace to get/set the timeout. This sets the tgt and lu reset timeout bits. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/infiniband/ulp/iser/iscsi_iser.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index add9188663ff..5f7a6fca0a4d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -625,6 +625,7 @@ static struct iscsi_transport iscsi_iser_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | -- cgit v1.2.3-59-g8ed1b From 1796e72291b2b6aafaec5954e666d0b5a95da935 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Nov 2009 16:34:36 -0600 Subject: [SCSI] libiscsi: hook into ramp up/down handling It is rare to get a queue full with iscsi, because targets seem to just reduce the iscsi cmd window. However, there is at least one iscsi target that will throw a queue full when overloaded. This hooks the iscsi code in to the ramp up/down code, so we can handle it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 07ec997c5d4f..b7689f3d05f5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1739,10 +1739,19 @@ EXPORT_SYMBOL_GPL(iscsi_queuecommand); int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { - if (reason != SCSI_QDEPTH_DEFAULT) + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + break; + case SCSI_QDEPTH_QFULL: + scsi_track_queue_full(sdev, depth); + break; + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + break; + default: return -EOPNOTSUPP; - - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + } return sdev->queue_depth; } EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); -- cgit v1.2.3-59-g8ed1b From 4d9ab994e214d35107017c342aca42477b137316 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:16:39 -0400 Subject: [SCSI] lpfc 8.3.5: fix reset path, ELS ordering and discovery issues This patch includes the following fixes: - Fixed panic during HBA reset. - Fixed FCoE event tag passed in resume_rpi. - Fix out of order ELS commands - Fixed discovery issues found during VLAN testing. - Fix UNREG_VPI failure on extended link pull - Fixed crash while processing unsolicited FC frames. - Clear retry count in the delayed ELS handler - Fixed discovery failure during quick link bounce. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 3 +- drivers/scsi/lpfc/lpfc_attr.c | 11 +- drivers/scsi/lpfc/lpfc_crtn.h | 2 +- drivers/scsi/lpfc/lpfc_els.c | 1 + drivers/scsi/lpfc/lpfc_hbadisc.c | 67 +++++++-- drivers/scsi/lpfc/lpfc_init.c | 51 ++----- drivers/scsi/lpfc/lpfc_sli.c | 293 ++++++++++++++++++++------------------- drivers/scsi/lpfc/lpfc_sli.h | 21 +-- drivers/scsi/lpfc/lpfc_sli4.h | 13 -- 9 files changed, 242 insertions(+), 220 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index aa10f7951634..c618eaf3c0c8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -109,7 +109,7 @@ struct hbq_dmabuf { struct lpfc_dmabuf dbuf; uint32_t size; uint32_t tag; - struct lpfc_rcqe rcqe; + struct lpfc_cq_event cq_event; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -551,6 +551,7 @@ struct lpfc_hba { uint8_t fc_linkspeed; /* Link speed after last READ_LA */ uint32_t fc_eventTag; /* event tag for link attention */ + uint32_t link_events; /* These fields used to be binfo */ uint32_t fc_pref_DID; /* preferred D_ID */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e1a30a16a9fa..07f0172674c9 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3815,7 +3815,11 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; - if (phba->fc_topology == TOPOLOGY_LOOP) { + if (phba->hba_flag & HBA_FCOE_SUPPORT) { + hs->lip_count = -1; + hs->nos_count = (phba->link_events >> 1); + hs->nos_count -= lso->link_events; + } else if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); hs->lip_count -= lso->link_events; hs->nos_count = -1; @@ -3906,7 +3910,10 @@ lpfc_reset_stats(struct Scsi_Host *shost) lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; - lso->link_events = (phba->fc_eventTag >> 1); + if (phba->hba_flag & HBA_FCOE_SUPPORT) + lso->link_events = (phba->link_events >> 1); + else + lso->link_events = (phba->fc_eventTag >> 1); psli->stats_start = get_seconds(); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 0830f37409a3..4438f8665a4a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -235,7 +235,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *); void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); -int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); +void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 45337cd23feb..4ea863f50650 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2452,6 +2452,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) */ del_timer_sync(&ndlp->nlp_delayfunc); retry = ndlp->nlp_retry; + ndlp->nlp_retry = 0; switch (cmd) { case ELS_CMD_FLOGI: diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e6a47e25b218..5073c127bfe1 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } - if (phba->hba_flag & HBA_RECEIVE_BUFFER) - lpfc_sli4_handle_received_buffer(phba); } vports = lpfc_create_vport_work_array(phba); @@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba) pring = &phba->sli.ring[LPFC_ELS_RING]; status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); - if ((status & HA_RXMASK) - || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { + if ((status & HA_RXMASK) || + (pring->flag & LPFC_DEFERRED_RING_EVENT) || + (phba->hba_flag & HBA_RECEIVE_BUFFER)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ @@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ - if (!remove && ndlp->nlp_type & NLP_FABRIC) + if ((phba->sli_rev < LPFC_SLI_REV4) && + (!remove && ndlp->nlp_type & NLP_FABRIC)) continue; rc = lpfc_disc_state_machine(vport, ndlp, NULL, remove @@ -1015,10 +1015,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mempool_free(mboxq, phba->mbox_mem_pool); return; } + phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; if (vport->port_state != LPFC_FLOGI) { spin_lock_irqsave(&phba->hbalock, flags); - phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_initial_flogi(vport); } @@ -1199,6 +1199,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) /* If the FCF is not availabe do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { + phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); return; } @@ -1216,15 +1217,23 @@ lpfc_register_fcf(struct lpfc_hba *phba) fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!fcf_mbxq) + if (!fcf_mbxq) { + spin_lock_irqsave(&phba->hbalock, flags); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irqrestore(&phba->hbalock, flags); return; + } lpfc_reg_fcfi(phba, fcf_mbxq); fcf_mbxq->vport = phba->pport; fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) + if (rc == MBX_NOT_FINISHED) { + spin_lock_irqsave(&phba->hbalock, flags); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irqrestore(&phba->hbalock, flags); mempool_free(fcf_mbxq, phba->mbox_mem_pool); + } return; } @@ -1253,6 +1262,20 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, uint16_t *vlan_id) { struct lpfc_fcf_conn_entry *conn_entry; + int i, j, fcf_vlan_id = 0; + + /* Find the lowest VLAN id in the FCF record */ + for (i = 0; i < 512; i++) { + if (new_fcf_record->vlan_bitmap[i]) { + fcf_vlan_id = i * 8; + j = 0; + while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { + j++; + fcf_vlan_id++; + } + break; + } + } /* If FCF not available return 0 */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || @@ -1286,7 +1309,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, if (*addr_mode & LPFC_FCF_FPMA) *addr_mode = LPFC_FCF_FPMA; - *vlan_id = 0xFFFF; + /* If FCF record report a vlan id use that vlan id */ + if (fcf_vlan_id) + *vlan_id = fcf_vlan_id; + else + *vlan_id = 0xFFFF; return 1; } @@ -1384,8 +1411,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, (*addr_mode & LPFC_FCF_FPMA)) *addr_mode = LPFC_FCF_FPMA; + /* If matching connect list has a vlan id, use it */ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) *vlan_id = conn_entry->conn_rec.vlan_tag; + /* + * If no vlan id is specified in connect list, use the vlan id + * in the FCF record + */ + else if (fcf_vlan_id) + *vlan_id = fcf_vlan_id; else *vlan_id = 0xFFFF; @@ -1423,6 +1457,12 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) if (phba->link_state >= LPFC_LINK_UP) lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + else + /* + * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS + * flag + */ + phba->hba_flag &= ~FCF_DISC_INPROGRESS; if (unreg_fcf) { spin_lock_irq(&phba->hbalock); @@ -2085,6 +2125,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) else phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; + phba->link_events++; if (la->attType == AT_LINK_UP && (!la->mm)) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { @@ -4409,6 +4450,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) if (lpfc_fcf_inuse(phba)) return; + /* At this point, all discovery is aborted */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; /* Unregister VPIs */ vports = lpfc_create_vport_work_array(phba); @@ -4512,8 +4555,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, - &phba->fcf_conn_rec_list, list) + &phba->fcf_conn_rec_list, list) { + list_del_init(&conn_entry->list); kfree(conn_entry); + } conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; record_count = conn_hdr->length * sizeof(uint32_t)/ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f913f1e93635..d654c0e3db4d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2919,6 +2919,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); int rc; + phba->fc_eventTag = acqe_fcoe->event_tag; phba->fcoe_eventtag = acqe_fcoe->event_tag; switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: @@ -2990,6 +2991,7 @@ static void lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, struct lpfc_acqe_dcbx *acqe_dcbx) { + phba->fc_eventTag = acqe_dcbx->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0290 The SLI4 DCBX asynchronous event is not " "handled yet\n"); @@ -3594,8 +3596,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, - &phba->fcf_conn_rec_list, list) + &phba->fcf_conn_rec_list, list) { + list_del_init(&conn_entry->list); kfree(conn_entry); + } return; } @@ -5058,15 +5062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.els_cq = qdesc; - /* Create slow-path Unsolicited Receive Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0502 Failed allocate slow-path USOL RX CQ\n"); - goto out_free_els_cq; - } - phba->sli4_hba.rxq_cq = qdesc; /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * @@ -5075,7 +5070,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2577 Failed allocate memory for fast-path " "CQ record array\n"); - goto out_free_rxq_cq; + goto out_free_els_cq; } for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, @@ -5188,9 +5183,6 @@ out_free_fcp_cq: phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; } kfree(phba->sli4_hba.fcp_cq); -out_free_rxq_cq: - lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); - phba->sli4_hba.rxq_cq = NULL; out_free_els_cq: lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; @@ -5247,10 +5239,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); phba->sli4_hba.dat_rq = NULL; - /* Release unsolicited receive complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); - phba->sli4_hba.rxq_cq = NULL; - /* Release ELS complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; @@ -5383,25 +5371,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); - /* Set up slow-path Unsolicited Receive Complete Queue */ - if (!phba->sli4_hba.rxq_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0532 USOL RX CQ not allocated\n"); - goto out_destroy_els_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, - LPFC_RCQ, LPFC_USOL); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0533 Failed setup of slow-path USOL RX CQ: " - "rc = 0x%x\n", rc); - goto out_destroy_els_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.rxq_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); - /* Set up fast-path FCP Response Complete Queue */ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { @@ -5507,7 +5476,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy_fcp_wq; } rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, - phba->sli4_hba.rxq_cq, LPFC_USOL); + phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " @@ -5519,7 +5488,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "parent cq-id=%d\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, - phba->sli4_hba.rxq_cq->queue_id); + phba->sli4_hba.els_cq->queue_id); return 0; out_destroy_fcp_wq: @@ -5531,8 +5500,6 @@ out_destroy_mbx_wq: out_destroy_fcp_cq: for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); - lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); -out_destroy_els_cq: lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); out_destroy_mbx_cq: lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); @@ -5574,8 +5541,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); /* Unset ELS complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); - /* Unset unsolicited receive complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); /* Unset FCP response complete queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 43cbe336f1f8..8d884d8e18be 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -3018,16 +3018,31 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_iocbq *irspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; unsigned long iflag; while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { /* Get the response iocb from the head of work queue */ spin_lock_irqsave(&phba->hbalock, iflag); list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, - irspiocbq, struct lpfc_iocbq, list); + cq_event, struct lpfc_cq_event, list); spin_unlock_irqrestore(&phba->hbalock, iflag); - /* Process the response iocb */ - lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + irspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + break; + case CQE_CODE_RECEIVE: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_sli4_handle_received_buffer(phba, dmabuf); + break; + default: + break; + } } } @@ -3416,6 +3431,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) /* perform board reset */ phba->fc_eventTag = 0; + phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; @@ -3476,6 +3492,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) /* perform board reset */ phba->fc_eventTag = 0; + phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; @@ -3495,7 +3512,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) list_del_init(&phba->sli4_hba.dat_rq->list); list_del_init(&phba->sli4_hba.mbx_cq->list); list_del_init(&phba->sli4_hba.els_cq->list); - list_del_init(&phba->sli4_hba.rxq_cq->list); for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) @@ -4243,7 +4259,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); - lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], LPFC_QUEUE_REARM); @@ -8351,8 +8366,7 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, sizeof(struct lpfc_iocbq) - offset); - memset(&pIocbIn->sli4_info, 0, - sizeof(struct lpfc_sli4_rspiocb_info)); + pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe; /* Map WCQE parameters into irspiocb parameters */ pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); if (pIocbOut->iocb_flag & LPFC_IO_FCP) @@ -8364,16 +8378,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; else pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - /* Load in additional WCQE parameters */ - pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); - pIocbIn->sli4_info.bfield = 0; - if (bf_get(lpfc_wcqe_c_xb, wcqe)) - pIocbIn->sli4_info.bfield |= LPFC_XB; - if (bf_get(lpfc_wcqe_c_pv, wcqe)) { - pIocbIn->sli4_info.bfield |= LPFC_PV; - pIocbIn->sli4_info.priority = - bf_get(lpfc_wcqe_c_priority, wcqe); - } } /** @@ -8598,7 +8602,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, /* Add the irspiocb to the response IOCB work list */ spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); + list_add_tail(&irspiocbq->cq_event.list, + &phba->sli4_hba.sp_rspiocb_work_queue); /* Indicate ELS ring attention */ phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -8689,52 +8694,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, return workposted; } -/** - * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry - * @phba: Pointer to HBA context object. - * @cq: Pointer to the completion queue. - * @wcqe: Pointer to a completion queue entry. - * - * This routine process a slow-path work-queue completion queue entry. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, - struct lpfc_cqe *cqe) -{ - struct lpfc_wcqe_complete wcqe; - bool workposted = false; - - /* Copy the work queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); - - /* Check and process for different type of WCQE and dispatch */ - switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { - case CQE_CODE_COMPL_WQE: - /* Process the WQ complete event */ - workposted = lpfc_sli4_sp_handle_els_wcqe(phba, - (struct lpfc_wcqe_complete *)&wcqe); - break; - case CQE_CODE_RELEASE_WQE: - /* Process the WQ release event */ - lpfc_sli4_sp_handle_rel_wcqe(phba, - (struct lpfc_wcqe_release *)&wcqe); - break; - case CQE_CODE_XRI_ABORTED: - /* Process the WQ XRI abort event */ - workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, - (struct sli4_wcqe_xri_aborted *)&wcqe); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0388 Not a valid WCQE code: x%x\n", - bf_get(lpfc_wcqe_c_code, &wcqe)); - break; - } - return workposted; -} - /** * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry * @phba: Pointer to HBA context object. @@ -8745,9 +8704,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * Return: true if work posted to worker thread, otherwise false. **/ static bool -lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) { - struct lpfc_rcqe rcqe; bool workposted = false; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; @@ -8755,15 +8713,13 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) uint32_t status; unsigned long iflags; - /* Copy the receive queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); lpfc_sli4_rq_release(hrq, drq); - if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) + if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE) goto out; - if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) + if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) goto out; - status = bf_get(lpfc_rcqe_status, &rcqe); + status = bf_get(lpfc_rcqe_status, rcqe); switch (status) { case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -8775,9 +8731,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } - memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); + memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); /* save off the frame for the word thread to process */ - list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); + list_add_tail(&dma_buf->cq_event.list, + &phba->sli4_hba.sp_rspiocb_work_queue); /* Frame received */ phba->hba_flag |= HBA_RECEIVE_BUFFER; spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -8797,6 +8754,58 @@ out: } +/** + * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to the completion queue. + * @wcqe: Pointer to a completion queue entry. + * + * This routine process a slow-path work-queue or recieve queue completion queue + * entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_complete wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ complete event */ + workposted = lpfc_sli4_sp_handle_els_wcqe(phba, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_sp_handle_rel_wcqe(phba, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + case CQE_CODE_RECEIVE: + /* Process the RQ event */ + workposted = lpfc_sli4_sp_handle_rcqe(phba, + (struct lpfc_rcqe *)&wcqe); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0388 Not a valid WCQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + /** * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry * @phba: Pointer to HBA context object. @@ -8858,14 +8867,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) break; case LPFC_WCQ: while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); - } - break; - case LPFC_RCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); + workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); if (!(++ecount % LPFC_GET_QE_REL_INT)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -10823,6 +10825,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) struct hbq_dmabuf *seq_dmabuf = NULL; struct hbq_dmabuf *temp_dmabuf = NULL; + INIT_LIST_HEAD(&dmabuf->dbuf.list); new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; /* Use the hdr_buf to find the sequence that this frame belongs to */ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { @@ -10845,7 +10848,9 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) } temp_hdr = seq_dmabuf->hbuf.virt; if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { - list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); + list_del_init(&seq_dmabuf->hbuf.list); + list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); return dmabuf; } /* find the correct place in the sequence to insert this frame */ @@ -10957,7 +10962,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) LPFC_DATA_BUF_SIZE; first_iocbq->iocb.un.rcvels.remoteID = sid; first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); + bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); } iocbq = first_iocbq; /* @@ -10975,7 +10981,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); + bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); } else { iocbq = lpfc_sli_get_iocbq(vport->phba); if (!iocbq) { @@ -10994,7 +11001,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); + bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); iocbq->iocb.un.rcvels.remoteID = sid; list_add_tail(&iocbq->list, &first_iocbq->list); } @@ -11014,11 +11022,11 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the * appropriate receive function when the final frame in a sequence is received. **/ -int -lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) +void +lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, + struct hbq_dmabuf *dmabuf) { - LIST_HEAD(cmplq); - struct hbq_dmabuf *dmabuf, *seq_dmabuf; + struct hbq_dmabuf *seq_dmabuf; struct fc_frame_header *fc_hdr; struct lpfc_vport *vport; uint32_t fcfi; @@ -11027,54 +11035,50 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) /* Clear hba flag and get all received buffers into the cmplq */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_RECEIVE_BUFFER; - list_splice_init(&phba->rb_pend_list, &cmplq); spin_unlock_irq(&phba->hbalock); /* Process each received buffer */ - while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { - fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; - /* check to see if this a valid type of frame */ - if (lpfc_fc_frame_check(phba, fc_hdr)) { - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); - vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); - if (!vport) { - /* throw out the frame */ - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - /* Link this frame */ - seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); - if (!seq_dmabuf) { - /* unable to add frame to vport - throw it out */ - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - /* If not last frame in sequence continue processing frames. */ - if (!lpfc_seq_complete(seq_dmabuf)) { - /* - * When saving off frames post a new one and mark this - * frame to be freed when it is finished. - **/ - lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); - dmabuf->tag = -1; - continue; - } - fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; - iocbq = lpfc_prep_seq(vport, seq_dmabuf); - if (!lpfc_complete_unsol_iocb(phba, - &phba->sli.ring[LPFC_ELS_RING], - iocbq, fc_hdr->fh_r_ctl, - fc_hdr->fh_type)) - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "2540 Ring %d handler: unexpected Rctl " - "x%x Type x%x received\n", - LPFC_ELS_RING, - fc_hdr->fh_r_ctl, fc_hdr->fh_type); - }; - return 0; + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* check to see if this a valid type of frame */ + if (lpfc_fc_frame_check(phba, fc_hdr)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); + if (!vport) { + /* throw out the frame */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + /* Link this frame */ + seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); + if (!seq_dmabuf) { + /* unable to add frame to vport - throw it out */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + /* If not last frame in sequence continue processing frames. */ + if (!lpfc_seq_complete(seq_dmabuf)) { + /* + * When saving off frames post a new one and mark this + * frame to be freed when it is finished. + **/ + lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); + dmabuf->tag = -1; + return; + } + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + iocbq = lpfc_prep_seq(vport, seq_dmabuf); + if (!lpfc_complete_unsol_iocb(phba, + &phba->sli.ring[LPFC_ELS_RING], + iocbq, fc_hdr->fh_r_ctl, + fc_hdr->fh_type)) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2540 Ring %d handler: unexpected Rctl " + "x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); } /** @@ -11542,7 +11546,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); - return -ENOMEM; + error = -ENOMEM; + goto fail_fcfscan; } req_len = sizeof(struct fcf_record) + @@ -11558,8 +11563,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) "0291 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; + error = -ENOMEM; + goto fail_fcfscan; } /* Get the first SGE entry from the non-embedded DMA memory. This @@ -11571,8 +11576,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2527 Failed to get the non-embedded SGE " "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; + error = -ENOMEM; + goto fail_fcfscan; } virt_addr = mboxq->sge_array->addr[0]; read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; @@ -11586,7 +11591,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_sli4_mbox_cmd_free(phba, mboxq); error = -EIO; } else { spin_lock_irq(&phba->hbalock); @@ -11594,6 +11598,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) spin_unlock_irq(&phba->hbalock); error = 0; } +fail_fcfscan: + if (error) { + if (mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + } return error; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 3c53316cf6d0..ad8094966ff3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; -/* This structure is used to carry the needed response IOCB states */ -struct lpfc_sli4_rspiocb_info { - uint8_t hw_status; - uint8_t bfield; -#define LPFC_XB 0x1 -#define LPFC_PV 0x2 - uint8_t priority; - uint8_t reserved; +struct lpfc_cq_event { + struct list_head list; + union { + struct lpfc_mcqe mcqe_cmpl; + struct lpfc_acqe_link acqe_link; + struct lpfc_acqe_fcoe acqe_fcoe; + struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_rcqe rcqe_cmpl; + struct sli4_wcqe_xri_aborted wcqe_axri; + struct lpfc_wcqe_complete wcqe_cmpl; + } cqe; }; /* This structure is used to handle IOCB requests / responses */ @@ -76,7 +79,7 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - struct lpfc_sli4_rspiocb_info sli4_info; + struct lpfc_cq_event cq_event; }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index b5f4ba1a5c27..97da7589e038 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -110,18 +110,6 @@ struct lpfc_queue { union sli4_qe qe[1]; /* array to index entries (must be last) */ }; -struct lpfc_cq_event { - struct list_head list; - union { - struct lpfc_mcqe mcqe_cmpl; - struct lpfc_acqe_link acqe_link; - struct lpfc_acqe_fcoe acqe_fcoe; - struct lpfc_acqe_dcbx acqe_dcbx; - struct lpfc_rcqe rcqe_cmpl; - struct sli4_wcqe_xri_aborted wcqe_axri; - } cqe; -}; - struct lpfc_sli4_link { uint8_t speed; uint8_t duplex; @@ -325,7 +313,6 @@ struct lpfc_sli4_hba { struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ - struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ /* Setup information for various queue parameters */ int eq_esize; -- cgit v1.2.3-59-g8ed1b From 6669f9bb902b8c3f5e33cb8c32c8c0eec6ed68ed Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:16:45 -0400 Subject: [SCSI] lpfc 8.3.5: fix VPI registration, error clean up and add support for vlink events This patch includes the following fixes and new features: - Fix mask size for CT field in WQE - Fix VPI base not used when unregistering VPI on port 1. - Fix UNREG_VPI mailbox command to unreg the correct VPI - Fixed Check for aborted els command - Fix error when trying to load driver with wrong firmware on FCoE HBA. - Fix bug with probe_one routines not putting the Scsi_Host back upon error - Add support for Clear Virtual Link Async Events - Add support for unsolicited CT exchange sequence abort - Add 0x0714 OCeXXXXX PCI ID Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_crtn.h | 5 +- drivers/scsi/lpfc/lpfc_ct.c | 34 ++++- drivers/scsi/lpfc/lpfc_els.c | 12 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 2 +- drivers/scsi/lpfc/lpfc_hw.h | 10 +- drivers/scsi/lpfc/lpfc_hw4.h | 89 +++++++++++-- drivers/scsi/lpfc/lpfc_init.c | 107 +++++++++++++++- drivers/scsi/lpfc/lpfc_mbox.c | 12 +- drivers/scsi/lpfc/lpfc_sli.c | 267 ++++++++++++++++++++++++++++++++++++--- drivers/scsi/lpfc/lpfc_sli.h | 2 +- drivers/scsi/lpfc/lpfc_sli4.h | 5 + 11 files changed, 498 insertions(+), 47 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 4438f8665a4a..0d450ae3a2d4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -144,6 +144,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); +void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); @@ -188,7 +190,7 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int); void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); -void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); +void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); @@ -361,6 +363,7 @@ void lpfc_stop_port(struct lpfc_hba *); void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); void lpfc_start_fdiscs(struct lpfc_hba *phba); +struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9a1bd9534d74..e724048bf390 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -87,7 +87,6 @@ void lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocbq) { - struct lpfc_dmabuf *mp = NULL; IOCB_t *icmd = &piocbq->iocb; int i; @@ -160,6 +159,39 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } +/** + * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort + * @phba: Pointer to HBA context object. + * @pring: Pointer to the driver internal I/O ring. + * @piocbq: Pointer to the IOCBQ. + * + * This function serves as the default handler for the sli4 unsolicited + * abort event. It shall be invoked when there is no application interface + * registered unsolicited abort handler. This handler does nothing but + * just simply releases the dma buffer used by the unsol abort event. + **/ +void +lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocbq) +{ + IOCB_t *icmd = &piocbq->iocb; + struct lpfc_dmabuf *bdeBuf; + uint32_t size; + + /* Forward abort event to any process registered to receive ct event */ + lpfc_bsg_ct_unsol_event(phba, pring, piocbq); + + /* If there is no BDE associated with IOCB, there is nothing to do */ + if (icmd->ulpBdeCount == 0) + return; + bdeBuf = piocbq->context2; + piocbq->context2 = NULL; + size = icmd->un.cont64[0].tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); + lpfc_in_buf_free(phba, bdeBuf); +} + static void lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4ea863f50650..489ddcd4c584 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2712,12 +2712,16 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, !lpfc_error_lost_link(irsp)) { /* FLOGI retry policy */ retry = 1; - maxretry = 48; - if (cmdiocb->retry >= 32) + /* retry forever */ + maxretry = 0; + if (cmdiocb->retry >= 100) + delay = 5000; + else if (cmdiocb->retry >= 32) delay = 1000; } - if ((++cmdiocb->retry) >= maxretry) { + cmdiocb->retry++; + if (maxretry && (cmdiocb->retry >= maxretry)) { phba->fc_stat.elsRetryExceeded++; retry = 0; } @@ -5671,7 +5675,7 @@ dropit: * NULL - No vport with the matching @vpi found * Otherwise - Address to the vport with the matching @vpi. **/ -static struct lpfc_vport * +struct lpfc_vport * lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) { struct lpfc_vport *vport; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 5073c127bfe1..1b2771ac15f2 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4474,7 +4474,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) return; } - lpfc_unreg_vfi(mbox, phba->pport->vfi); + lpfc_unreg_vfi(mbox, phba->pport); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index ccb26724dc53..74f9f028b45f 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1183,6 +1183,7 @@ typedef struct { #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 #define PCI_DEVICE_ID_TIGERSHARK 0x0704 +#define PCI_DEVICE_ID_TS_BE3 0x0714 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1444,6 +1445,7 @@ typedef struct { /* FireFly BIU registers */ #define CMD_ABORT_MXRI64_CN 0x8C #define CMD_RCV_ELS_REQ64_CX 0x8D #define CMD_XMIT_ELS_RSP64_CX 0x95 +#define CMD_XMIT_BLS_RSP64_CX 0x97 #define CMD_FCP_IWRITE64_CR 0x98 #define CMD_FCP_IWRITE64_CX 0x99 #define CMD_FCP_IREAD64_CR 0x9A @@ -2326,7 +2328,13 @@ typedef struct { /* Structure for MB Command UNREG_VPI (0x97) */ typedef struct { uint32_t rsvd1; - uint32_t rsvd2; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd2; + uint16_t sli4_vpi; +#else /* __LITTLE_ENDIAN */ + uint16_t sli4_vpi; + uint16_t rsvd2; +#endif uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 3689eee04535..0c65091110cc 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -425,7 +425,7 @@ struct lpfc_wqe_generic{ #define lpfc_wqe_gen_status_MASK 0x0000000F #define lpfc_wqe_gen_status_WORD word7 #define lpfc_wqe_gen_ct_SHIFT 2 -#define lpfc_wqe_gen_ct_MASK 0x00000007 +#define lpfc_wqe_gen_ct_MASK 0x00000003 #define lpfc_wqe_gen_ct_WORD word7 uint32_t abort_tag; uint32_t word9; @@ -760,6 +760,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D /* FCoE Opcodes */ @@ -1273,6 +1274,51 @@ struct lpfc_mbx_del_fcf_tbl_entry { #define lpfc_mbx_del_fcf_tbl_index_WORD word10 }; +struct lpfc_mbx_query_fw_cfg { + struct mbox_header header; + uint32_t config_number; + uint32_t asic_rev; + uint32_t phys_port; + uint32_t function_mode; +/* firmware Function Mode */ +#define lpfc_function_mode_toe_SHIFT 0 +#define lpfc_function_mode_toe_MASK 0x00000001 +#define lpfc_function_mode_toe_WORD function_mode +#define lpfc_function_mode_nic_SHIFT 1 +#define lpfc_function_mode_nic_MASK 0x00000001 +#define lpfc_function_mode_nic_WORD function_mode +#define lpfc_function_mode_rdma_SHIFT 2 +#define lpfc_function_mode_rdma_MASK 0x00000001 +#define lpfc_function_mode_rdma_WORD function_mode +#define lpfc_function_mode_vm_SHIFT 3 +#define lpfc_function_mode_vm_MASK 0x00000001 +#define lpfc_function_mode_vm_WORD function_mode +#define lpfc_function_mode_iscsi_i_SHIFT 4 +#define lpfc_function_mode_iscsi_i_MASK 0x00000001 +#define lpfc_function_mode_iscsi_i_WORD function_mode +#define lpfc_function_mode_iscsi_t_SHIFT 5 +#define lpfc_function_mode_iscsi_t_MASK 0x00000001 +#define lpfc_function_mode_iscsi_t_WORD function_mode +#define lpfc_function_mode_fcoe_i_SHIFT 6 +#define lpfc_function_mode_fcoe_i_MASK 0x00000001 +#define lpfc_function_mode_fcoe_i_WORD function_mode +#define lpfc_function_mode_fcoe_t_SHIFT 7 +#define lpfc_function_mode_fcoe_t_MASK 0x00000001 +#define lpfc_function_mode_fcoe_t_WORD function_mode +#define lpfc_function_mode_dal_SHIFT 8 +#define lpfc_function_mode_dal_MASK 0x00000001 +#define lpfc_function_mode_dal_WORD function_mode +#define lpfc_function_mode_lro_SHIFT 9 +#define lpfc_function_mode_lro_MASK 0x00000001 +#define lpfc_function_mode_lro_WORD function_mode9 +#define lpfc_function_mode_flex10_SHIFT 10 +#define lpfc_function_mode_flex10_MASK 0x00000001 +#define lpfc_function_mode_flex10_WORD function_mode +#define lpfc_function_mode_ncsi_SHIFT 11 +#define lpfc_function_mode_ncsi_MASK 0x00000001 +#define lpfc_function_mode_ncsi_WORD function_mode +}; + /* Status field for embedded SLI_CONFIG mailbox command */ #define STATUS_SUCCESS 0x0 #define STATUS_FAILED 0x1 @@ -1804,6 +1850,7 @@ struct lpfc_mqe { struct lpfc_mbx_read_config rd_config; struct lpfc_mbx_request_features req_ftrs; struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; + struct lpfc_mbx_query_fw_cfg query_fw_cfg; struct lpfc_mbx_nop nop; } un; }; @@ -1885,7 +1932,7 @@ struct lpfc_acqe_link { }; struct lpfc_acqe_fcoe { - uint32_t fcf_index; + uint32_t index; uint32_t word1; #define lpfc_acqe_fcoe_fcf_count_SHIFT 0 #define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF @@ -1896,6 +1943,7 @@ struct lpfc_acqe_fcoe { #define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 +#define LPFC_FCOE_EVENT_TYPE_CVL 0x4 uint32_t event_tag; uint32_t trailer; }; @@ -1924,9 +1972,9 @@ struct lpfc_bmbx_create { #define NO_XRI ((uint16_t)-1) struct wqe_common { uint32_t word6; -#define wqe_xri_SHIFT 0 -#define wqe_xri_MASK 0x0000FFFF -#define wqe_xri_WORD word6 +#define wqe_xri_tag_SHIFT 0 +#define wqe_xri_tag_MASK 0x0000FFFF +#define wqe_xri_tag_WORD word6 #define wqe_ctxt_tag_SHIFT 16 #define wqe_ctxt_tag_MASK 0x0000FFFF #define wqe_ctxt_tag_WORD word6 @@ -1987,7 +2035,7 @@ struct wqe_common { #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 #define wqe_cqid_SHIFT 16 -#define wqe_cqid_MASK 0x000003ff +#define wqe_cqid_MASK 0x0000ffff #define wqe_cqid_WORD word11 }; @@ -1996,6 +2044,9 @@ struct wqe_did { #define wqe_els_did_SHIFT 0 #define wqe_els_did_MASK 0x00FFFFFF #define wqe_els_did_WORD word5 +#define wqe_xmit_bls_pt_SHIFT 28 +#define wqe_xmit_bls_pt_MASK 0x00000003 +#define wqe_xmit_bls_pt_WORD word5 #define wqe_xmit_bls_ar_SHIFT 30 #define wqe_xmit_bls_ar_MASK 0x00000001 #define wqe_xmit_bls_ar_WORD word5 @@ -2044,6 +2095,23 @@ struct xmit_els_rsp64_wqe { struct xmit_bls_rsp64_wqe { uint32_t payload0; +/* Payload0 for BA_ACC */ +#define xmit_bls_rsp64_acc_seq_id_SHIFT 16 +#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff +#define xmit_bls_rsp64_acc_seq_id_WORD payload0 +#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24 +#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff +#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0 +/* Payload0 for BA_RJT */ +#define xmit_bls_rsp64_rjt_vspec_SHIFT 0 +#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_vspec_WORD payload0 +#define xmit_bls_rsp64_rjt_expc_SHIFT 8 +#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_expc_WORD payload0 +#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16 +#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff +#define xmit_bls_rsp64_rjt_rsnc_WORD payload0 uint32_t word1; #define xmit_bls_rsp64_rxid_SHIFT 0 #define xmit_bls_rsp64_rxid_MASK 0x0000ffff @@ -2052,18 +2120,19 @@ struct xmit_bls_rsp64_wqe { #define xmit_bls_rsp64_oxid_MASK 0x0000ffff #define xmit_bls_rsp64_oxid_WORD word1 uint32_t word2; -#define xmit_bls_rsp64_seqcntlo_SHIFT 0 -#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff -#define xmit_bls_rsp64_seqcntlo_WORD word2 -#define xmit_bls_rsp64_seqcnthi_SHIFT 16 +#define xmit_bls_rsp64_seqcnthi_SHIFT 0 #define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff #define xmit_bls_rsp64_seqcnthi_WORD word2 +#define xmit_bls_rsp64_seqcntlo_SHIFT 16 +#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcntlo_WORD word2 uint32_t rsrvd3; uint32_t rsrvd4; struct wqe_did wqe_dest; struct wqe_common wqe_com; /* words 6-11 */ uint32_t rsvd_12_15[4]; }; + struct wqe_rctl_dfctl { uint32_t word5; #define wqe_si_SHIFT 2 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d654c0e3db4d..a7b5566ea0b5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1669,6 +1669,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) oneConnect = 1; m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; break; + case PCI_DEVICE_ID_TS_BE3: + oneConnect = 1; + m = (typeof(m)) {"OCeXXXXX-F", max_speed, "PCIe"}; + break; default: m = (typeof(m)){ NULL }; break; @@ -2698,6 +2702,63 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) mempool_free(mboxq, phba->mbox_mem_pool); } +/** + * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support + * @phba: pointer to lpfc hba data structure. + * + * This function uses the QUERY_FW_CFG mailbox command to determine if the + * firmware loaded supports FCoE. A return of zero indicates that the mailbox + * was successful and the firmware supports FCoE. Any other return indicates + * a error. It is assumed that this function will be called before interrupts + * are enabled. + **/ +static int +lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_query_fw_cfg *query_fw_cfg; + uint32_t length; + uint32_t shdr_status, shdr_add_status; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2621 Failed to allocate mbox for " + "query firmware config cmd\n"); + return -ENOMEM; + } + query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; + length = (sizeof(struct lpfc_mbx_query_fw_cfg) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_QUERY_FW_CFG, + length, LPFC_SLI4_MBX_EMBED); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &query_fw_cfg->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &query_fw_cfg->header.cfg_shdr.response); + if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2622 Query Firmware Config failed " + "mbx status x%x, status x%x add_status x%x\n", + rc, shdr_status, shdr_add_status); + return -EINVAL; + } + if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2623 FCoE Function not supported by firmware. " + "Function mode = %08x\n", + query_fw_cfg->function_mode); + return -EINVAL; + } + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + return 0; +} + /** * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. @@ -2918,6 +2979,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, { uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); int rc; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; phba->fc_eventTag = acqe_fcoe->event_tag; phba->fcoe_eventtag = acqe_fcoe->event_tag; @@ -2925,7 +2989,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_NEW_FCF: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2546 New FCF found index 0x%x tag 0x%x\n", - acqe_fcoe->fcf_index, + acqe_fcoe->index, acqe_fcoe->event_tag); /* * If the current FCF is in discovered state, or @@ -2958,10 +3022,10 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2549 FCF disconnected fron network index 0x%x" - " tag 0x%x\n", acqe_fcoe->fcf_index, + " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ - if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) + if (phba->fcf.fcf_indx != acqe_fcoe->index) break; /* * Currently, driver support only one FCF - so treat this as @@ -2971,7 +3035,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, /* Unregister FCF if no devices connected to it */ lpfc_unregister_unused_fcf(phba); break; - + case LPFC_FCOE_EVENT_TYPE_CVL: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2718 Clear Virtual Link Received for VPI 0x%x" + " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); + vport = lpfc_find_vport_by_vpid(phba, + acqe_fcoe->index /*- phba->vpi_base*/); + if (!vport) + break; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + break; + shost = lpfc_shost_from_vport(vport); + lpfc_linkdown_port(vport); + if (vport->port_type != LPFC_NPIV_PORT) { + mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); + ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; + vport->port_state = LPFC_FLOGI; + } + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0288 Unknown FCoE event type 0x%x event tag " @@ -3463,6 +3548,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (unlikely(rc)) goto out_free_bsmbx; + rc = lpfc_sli4_fw_cfg_check(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + /* Set up the hba's configuration parameters. */ rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) @@ -6687,6 +6776,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; + struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; @@ -6765,6 +6855,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_destroy_shost; } + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { @@ -6831,6 +6922,8 @@ out_unset_pci_mem_s3: lpfc_sli_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); + if (shost) + scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; @@ -7214,6 +7307,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; + struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; int mcnt; @@ -7294,6 +7388,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_destroy_shost; } + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { @@ -7362,6 +7457,8 @@ out_unset_pci_mem_s4: lpfc_sli4_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); + if (shost) + scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; @@ -7936,6 +8033,8 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TS_BE3, + PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 1ab405902a18..2a38d94654bc 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -849,7 +849,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; + if (phba->sli_rev < LPFC_SLI_REV4) + mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; + else + mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; @@ -1850,7 +1853,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) /** * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. - * @vfi: VFI to be unregistered. + * @vport: vport associated with the VF. * * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric * (logical NPort) into the inactive state. The SLI Host must have logged out @@ -1859,11 +1862,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) * fabric inactive. **/ void -lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) +lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); - bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); + bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, + vport->vfi + vport->phba->vfi_base); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8d884d8e18be..e8d3e4732a84 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -59,7 +59,8 @@ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, uint8_t *, uint32_t *); - +static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, + struct hbq_dmabuf *); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) { @@ -572,9 +573,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); if (sglq) { if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED - || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) + && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) && (iocbq->iocb.un.ulpWord[4] - == IOERR_SLI_ABORTED))) { + == IOERR_ABORT_REQUESTED))) { spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); list_add(&sglq->list, @@ -767,6 +768,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_CLOSE_XRI_CX: case CMD_XRI_ABORTED_CX: case CMD_ABORT_MXRI64_CN: + case CMD_XMIT_BLS_RSP64_CX: type = LPFC_ABORT_IOCB; break; case CMD_RCV_SEQUENCE_CX: @@ -6081,6 +6083,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, command_type = OTHER_COMMAND; xritag = 0; break; + case CMD_XMIT_BLS_RSP64_CX: + /* As BLS ABTS-ACC WQE is very different from other WQEs, + * we re-construct this WQE here based on information in + * iocbq from scratch. + */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, + iocbq->iocb.un.ulpWord[3]); + bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, + iocbq->sli4_xritag); + bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); + bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); + bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, + iocbq->iocb.ulpContext); + /* Overwrite the pre-set comnd type with OTHER_COMMAND */ + command_type = OTHER_COMMAND; + break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ /* words0-2 are all 0's no bde */ @@ -6139,7 +6158,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) sglq = NULL; else { sglq = __lpfc_sli_get_sglq(phba); @@ -6464,7 +6483,7 @@ lpfc_sli_setup(struct lpfc_hba *phba) pring->iotag_max = 4096; pring->lpfc_sli_rcv_async_status = lpfc_sli_async_event_handler; - pring->num_mask = 4; + pring->num_mask = LPFC_MAX_RING_MASK; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = FC_ELS_REQ; pring->prt[0].type = FC_ELS_DATA; @@ -6489,6 +6508,12 @@ lpfc_sli_setup(struct lpfc_hba *phba) pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; + /* abort unsolicited sequence */ + pring->prt[4].profile = 0; /* Mask 4 */ + pring->prt[4].rctl = FC_RCTL_BA_ABTS; + pring->prt[4].type = FC_TYPE_BLS; + pring->prt[4].lpfc_sli_rcv_unsol_event = + lpfc_sli4_ct_abort_unsol_event; break; } totiocbsize += (pring->numCiocb * pring->sizeCiocb) + @@ -10869,6 +10894,177 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) return NULL; } +/** + * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence + * @vport: pointer to a vitural port + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function tries to abort from the partially assembed sequence, described + * by the information from basic abbort @dmabuf. It checks to see whether such + * partially assembled sequence held by the driver. If so, it shall free up all + * the frames from the partially assembled sequence. + * + * Return + * true -- if there is matching partially assembled sequence present and all + * the frames freed with the sequence; + * false -- if there is no matching partially assembled sequence present so + * nothing got aborted in the lower layer driver + **/ +static bool +lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *new_hdr; + struct fc_frame_header *temp_hdr; + struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; + struct hbq_dmabuf *seq_dmabuf = NULL; + + /* Use the hdr_buf to find the sequence that matches this frame */ + INIT_LIST_HEAD(&dmabuf->dbuf.list); + INIT_LIST_HEAD(&dmabuf->hbuf.list); + new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { + temp_hdr = (struct fc_frame_header *)h_buf->virt; + if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || + (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || + (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) + continue; + /* found a pending sequence that matches this frame */ + seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + break; + } + + /* Free up all the frames from the partially assembled sequence */ + if (seq_dmabuf) { + list_for_each_entry_safe(d_buf, n_buf, + &seq_dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + return true; + } + return false; +} + +/** + * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler + * @phba: Pointer to HBA context object. + * @cmd_iocbq: pointer to the command iocbq structure. + * @rsp_iocbq: pointer to the response iocbq structure. + * + * This function handles the sequence abort accept iocb command complete + * event. It properly releases the memory allocated to the sequence abort + * accept iocb. + **/ +static void +lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmd_iocbq, + struct lpfc_iocbq *rsp_iocbq) +{ + if (cmd_iocbq) + lpfc_sli_release_iocbq(phba, cmd_iocbq); +} + +/** + * lpfc_sli4_seq_abort_acc - Accept sequence abort + * @phba: Pointer to HBA context object. + * @fc_hdr: pointer to a FC frame header. + * + * This function sends a basic accept to a previous unsol sequence abort + * event after aborting the sequence handling. + **/ +static void +lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, + struct fc_frame_header *fc_hdr) +{ + struct lpfc_iocbq *ctiocb = NULL; + struct lpfc_nodelist *ndlp; + uint16_t oxid; + uint32_t sid; + IOCB_t *icmd; + + if (!lpfc_is_link_up(phba)) + return; + + sid = sli4_sid_from_fc_hdr(fc_hdr); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "1268 Find ndlp returned NULL for oxid:x%x " + "SID:x%x\n", oxid, sid); + return; + } + + /* Allocate buffer for acc iocb */ + ctiocb = lpfc_sli_get_iocbq(phba); + if (!ctiocb) + return; + + icmd = &ctiocb->iocb; + icmd->un.xseq64.bdl.ulpIoTag32 = 0; + icmd->un.xseq64.bdl.bdeSize = 0; + icmd->un.xseq64.w5.hcsw.Dfctl = 0; + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; + icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; + + /* Fill in the rest of iocb fields */ + icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; + icmd->ulpBdeCount = 0; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + icmd->ulpContext = ndlp->nlp_rpi; + icmd->un.ulpWord[3] = oxid; + + ctiocb->sli4_xritag = NO_XRI; + ctiocb->iocb_cmpl = NULL; + ctiocb->vport = phba->pport; + ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; + + /* Xmit CT abts accept on exchange */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", + CMD_XMIT_BLS_RSP64_CX, phba->link_state); + lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); +} + +/** + * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event + * @vport: Pointer to the vport on which this sequence was received + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function handles an SLI-4 unsolicited abort event. If the unsolicited + * receive sequence is only partially assembed by the driver, it shall abort + * the partially assembled frames for the sequence. Otherwise, if the + * unsolicited receive sequence has been completely assembled and passed to + * the Upper Layer Protocol (UPL), it then mark the per oxid status for the + * unsolicited sequence has been aborted. After that, it will issue a basic + * accept to accept the abort. + **/ +void +lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct lpfc_hba *phba = vport->phba; + struct fc_frame_header fc_hdr; + bool abts_par; + + /* Try to abort partially assembled seq */ + abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); + + /* Make a copy of fc_hdr before the dmabuf being released */ + memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); + + /* Send abort to ULP if partially seq abort failed */ + if (abts_par == false) + lpfc_sli4_send_seq_to_ulp(vport, dmabuf); + else + lpfc_in_buf_free(phba, &dmabuf->dbuf); + /* Send basic accept (BA_ACC) to the abort requester */ + lpfc_sli4_seq_abort_acc(phba, &fc_hdr); +} + /** * lpfc_seq_complete - Indicates if a sequence is complete * @dmabuf: pointer to a dmabuf that describes the FC sequence @@ -10941,9 +11137,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) /* remove from receive buffer list */ list_del_init(&seq_dmabuf->hbuf.list); /* get the Remote Port's SID */ - sid = (fc_hdr->fh_s_id[0] << 16 | - fc_hdr->fh_s_id[1] << 8 | - fc_hdr->fh_s_id[2]); + sid = sli4_sid_from_fc_hdr(fc_hdr); /* Get an iocbq struct to fill in. */ first_iocbq = lpfc_sli_get_iocbq(vport->phba); if (first_iocbq) { @@ -11010,6 +11204,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) return first_iocbq; } +static void +lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, + struct hbq_dmabuf *seq_dmabuf) +{ + struct fc_frame_header *fc_hdr; + struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; + struct lpfc_hba *phba = vport->phba; + + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + iocbq = lpfc_prep_seq(vport, seq_dmabuf); + if (!iocbq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2707 Ring %d handler: Failed to allocate " + "iocb Rctl x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + return; + } + if (!lpfc_complete_unsol_iocb(phba, + &phba->sli.ring[LPFC_ELS_RING], + iocbq, fc_hdr->fh_r_ctl, + fc_hdr->fh_type)) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2540 Ring %d handler: unexpected Rctl " + "x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + + /* Free iocb created in lpfc_prep_seq */ + list_for_each_entry_safe(curr_iocb, next_iocb, + &iocbq->list, list) { + list_del_init(&curr_iocb->list); + lpfc_sli_release_iocbq(phba, curr_iocb); + } + lpfc_sli_release_iocbq(phba, iocbq); +} + /** * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware * @phba: Pointer to HBA context object. @@ -11030,7 +11261,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr; struct lpfc_vport *vport; uint32_t fcfi; - struct lpfc_iocbq *iocbq; /* Clear hba flag and get all received buffers into the cmplq */ spin_lock_irq(&phba->hbalock); @@ -11051,6 +11281,12 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } + /* Handle the basic abort sequence (BA_ABTS) event */ + if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { + lpfc_sli4_handle_unsol_abort(vport, dmabuf); + return; + } + /* Link this frame */ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); if (!seq_dmabuf) { @@ -11068,17 +11304,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, dmabuf->tag = -1; return; } - fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; - iocbq = lpfc_prep_seq(vport, seq_dmabuf); - if (!lpfc_complete_unsol_iocb(phba, - &phba->sli.ring[LPFC_ELS_RING], - iocbq, fc_hdr->fh_r_ctl, - fc_hdr->fh_type)) - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "2540 Ring %d handler: unexpected Rctl " - "x%x Type x%x received\n", - LPFC_ELS_RING, - fc_hdr->fh_r_ctl, fc_hdr->fh_type); + /* Send the complete sequence to the upper layer protocol */ + lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index ad8094966ff3..0e518b12f414 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -113,7 +113,7 @@ typedef struct lpfcMboxq { return */ #define MBX_NOWAIT 2 /* issue command then return immediately */ -#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per +#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per ring */ #define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 97da7589e038..fc3de6fdd709 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -58,6 +58,11 @@ #define LPFC_FCOE_FKA_ADV_PER 0 #define LPFC_FCOE_FIP_PRIORITY 0x80 +#define sli4_sid_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_s_id[0] << 16 | \ + (fc_hdr)->fh_s_id[1] << 8 | \ + (fc_hdr)->fh_s_id[2]) + enum lpfc_sli4_queue_type { LPFC_EQ, LPFC_GCQ, -- cgit v1.2.3-59-g8ed1b From 6a9c52cf22e4ca13816bb2bd9899129cd4445de7 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:16:51 -0400 Subject: [SCSI] lpfc 8.3.5: fix sysfs parameters, vport creation and other bugs and update logging This patch include the following fixes and changes: - Fix crash when "error" is echoed to board_mode sysfs parameter - Fix FCoE Parameter parsing in regions 23 - Fix driver crash when creating vport with large number of targets on SLI4 - Fix bug with npiv message being logged when it is not supported by the adapter - Fix a potential dereferencing mailbox structure after free bug - Fix firmware crash after vport create with high target count - Error out requests to set board_mode to warm restart via sysfs on SLI4 HBAs - Fix Block guard logging - Fix a memory corruption issue during GID_FT IO prep - Fix crash while processing unsolicited FC frames - Fix failed to allocate XRI message is not a critical failure - Update and fix formatting in some log messages - Fix missing new line characters in log messages - Removed the use of the locally defined FC transport layer related macros - Check the rsplen in lpfc_handle_fcp_err function before using rsplen Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 15 +++- drivers/scsi/lpfc/lpfc_bsg.c | 5 +- drivers/scsi/lpfc/lpfc_ct.c | 19 +++-- drivers/scsi/lpfc/lpfc_debugfs.c | 10 +-- drivers/scsi/lpfc/lpfc_disc.h | 2 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 13 +-- drivers/scsi/lpfc/lpfc_hw.h | 15 ---- drivers/scsi/lpfc/lpfc_init.c | 54 ++++++------ drivers/scsi/lpfc/lpfc_mbox.c | 11 ++- drivers/scsi/lpfc/lpfc_scsi.c | 180 +++++++++++++++++++++++---------------- drivers/scsi/lpfc/lpfc_sli.c | 85 +++++++----------- drivers/scsi/lpfc/lpfc_sli4.h | 2 +- 12 files changed, 214 insertions(+), 197 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 07f0172674c9..e058f1018ff2 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -762,9 +763,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) - status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); + if (phba->sli_rev == LPFC_SLI_REV4) + return -EINVAL; + else + status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); else if (strncmp(buf, "error", sizeof("error") - 1) == 0) - status = lpfc_do_offline(phba, LPFC_EVT_KILL); + if (phba->sli_rev == LPFC_SLI_REV4) + return -EINVAL; + else + status = lpfc_do_offline(phba, LPFC_EVT_KILL); else return -EINVAL; @@ -2846,7 +2853,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " # identifies what rctl value to configure the additional ring for. # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). */ -LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, +LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 255, "Identifies RCTL for additional ring configuration"); /* @@ -2854,7 +2861,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, # identifies what type value to configure the additional ring for. # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). */ -LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1, +LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 255, "Identifies TYPE for additional ring configuration"); /* diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index da6bf5aac9dd..a5d9048235d9 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -148,8 +149,8 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) cmd->ulpCommand = CMD_GEN_REQUEST64_CR; cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); cmd->un.genreq64.w5.hcsw.Dfctl = 0; - cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; - cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; + cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; cmd->ulpBdeCount = 1; cmd->ulpLe = 1; cmd->ulpClass = CLASS3; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index e724048bf390..0ebcd9baca79 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -336,8 +337,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, /* Fill in rest of iocb */ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); icmd->un.genreq64.w5.hcsw.Dfctl = 0; - icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; - icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; + icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; + icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; if (!tmo) { /* FC spec states we need 3 * ratov for CT requests */ @@ -395,9 +396,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); if (!outmp) return -ENOMEM; - + /* + * Form the CT IOCB. The total number of BDEs in this IOCB + * is the single command plus response count from + * lpfc_alloc_ct_rsp. + */ + cnt += 1; status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, - cnt+1, 0, retry); + cnt, 0, retry); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; @@ -533,6 +539,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) SLI_CTNS_GFF_ID, 0, Did) == 0) vport->num_disc_nodes++; + else + lpfc_setup_disc_node + (vport, Did); } else { lpfc_debugfs_disc_trc(vport, @@ -1241,7 +1250,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, be16_to_cpu(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; - CtReq->un.rff.type_code = FC_FCP_DATA; + CtReq->un.rff.type_code = FC_TYPE_FCP; cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 8d0f0de76b63..391584183d81 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -926,7 +926,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) goto out; /* Round to page boundry */ - printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", + printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", __func__, _dump_buf_data); debug->buffer = _dump_buf_data; if (!debug->buffer) { @@ -956,8 +956,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) goto out; /* Round to page boundry */ - printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, - _dump_buf_dif, file->f_dentry->d_name.name); + printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", + __func__, _dump_buf_dif, file->f_dentry->d_name.name); debug->buffer = _dump_buf_dif; if (!debug->buffer) { kfree(debug); @@ -1377,7 +1377,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_dir(name, phba->hba_debugfs_root); if (!vport->vport_debugfs_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0417 Cant create debugfs"); + "0417 Cant create debugfs\n"); goto debug_failed; } atomic_inc(&phba->debugfs_vport_count); @@ -1430,7 +1430,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) vport, &lpfc_debugfs_op_nodelist); if (!vport->debug_nodelist) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0409 Cant create debugfs nodelist"); + "0409 Cant create debugfs nodelist\n"); goto debug_failed; } debug_failed: diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 1142070e9484..f26f6e160a2a 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -19,7 +19,7 @@ *******************************************************************/ #define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ -#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ +#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */ #define FC_MAXLOOP 126 /* max devices supported on a fc loop */ #define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 1b2771ac15f2..e8689cabe5f7 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1699,9 +1699,8 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_initial_fdisc(vport); else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, - LOG_ELS, - "2606 No NPIV Fabric support\n"); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2606 No NPIV Fabric support\n"); } return; } @@ -1901,7 +1900,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) if (phba->fc_topology == TOPOLOGY_LOOP) { phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; - if (phba->cfg_enable_npiv) + /* if npiv is enabled and this adapter supports npiv log + * a message that npiv is not supported in this topology + */ + if (phba->cfg_enable_npiv && phba->max_vpi) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1309 Link Up Event npiv not supported in loop " "topology\n"); @@ -3118,7 +3120,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) struct lpfc_sli *psli; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; - uint32_t rpi, i; + uint32_t i; lpfc_fabric_abort_nport(ndlp); @@ -3127,7 +3129,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * by firmware with a no rpi error. */ psli = &phba->sli; - rpi = ndlp->nlp_rpi; if (ndlp->nlp_flag & NLP_RPI_VALID) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 74f9f028b45f..8274f998ef2f 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1124,21 +1124,6 @@ typedef struct { /* Number of 4-byte words in an IOCB. */ #define IOCB_WORD_SZ 8 -/* defines for type field in fc header */ -#define FC_ELS_DATA 0x1 -#define FC_LLC_SNAP 0x5 -#define FC_FCP_DATA 0x8 -#define FC_COMMON_TRANSPORT_ULP 0x20 - -/* defines for rctl field in fc header */ -#define FC_DEV_DATA 0x0 -#define FC_UNSOL_CTL 0x2 -#define FC_SOL_CTL 0x3 -#define FC_UNSOL_DATA 0x4 -#define FC_FCP_CMND 0x6 -#define FC_ELS_REQ 0x22 -#define FC_ELS_RSP 0x23 - /* network headers for Dfctl field */ #define FC_NET_HDR 0x20 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a7b5566ea0b5..12ab1eae47f9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3004,12 +3004,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, spin_unlock_irq(&phba->hbalock); /* Read the FCF table and re-discover SAN. */ - rc = lpfc_sli4_read_fcf_record(phba, - LPFC_FCOE_FCF_GET_FIRST); + rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2547 Read FCF record failed 0x%x\n", - rc); + "2547 Read FCF record failed 0x%x\n", + rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: @@ -3021,7 +3020,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2549 FCF disconnected fron network index 0x%x" + "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ @@ -3917,7 +3916,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba) rc = lpfc_sli4_remove_all_sgl_pages(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2005 Unable to deregister pages from HBA: %x", rc); + "2005 Unable to deregister pages from HBA: %x\n", rc); } kfree(phba->sli4_hba.lpfc_els_sgl_array); } @@ -4366,7 +4365,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) _dump_buf_data = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_data) { - printk(KERN_ERR "BLKGRD allocated %d pages for " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9043 BLKGRD: allocated %d pages for " "_dump_buf_data at 0x%p\n", (1 << pagecnt), _dump_buf_data); _dump_buf_data_order = pagecnt; @@ -4377,17 +4377,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) --pagecnt; } if (!_dump_buf_data_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9044 BLKGRD: ERROR unable to allocate " "memory for hexdump\n"); } else - printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9045 BLKGRD: already allocated _dump_buf_data=0x%p" "\n", _dump_buf_data); if (!_dump_buf_dif) { while (pagecnt) { _dump_buf_dif = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_dif) { - printk(KERN_ERR "BLKGRD allocated %d pages for " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9046 BLKGRD: allocated %d pages for " "_dump_buf_dif at 0x%p\n", (1 << pagecnt), _dump_buf_dif); _dump_buf_dif_order = pagecnt; @@ -4398,10 +4401,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) --pagecnt; } if (!_dump_buf_dif_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9047 BLKGRD: ERROR unable to allocate " "memory for hexdump\n"); } else - printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", _dump_buf_dif); } @@ -5072,10 +5077,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* It does not make sense to have more EQs than WQs */ if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2593 The number of FCP EQs (%d) is more " - "than the number of FCP WQs (%d), take " - "the number of FCP EQs same as than of " - "WQs (%d)\n", cfg_fcp_eq_count, + "2593 The FCP EQ count(%d) cannot be greater " + "than the FCP WQ count(%d), limiting the " + "FCP EQ count to %d\n", cfg_fcp_eq_count, phba->cfg_fcp_wq_count, phba->cfg_fcp_wq_count); cfg_fcp_eq_count = phba->cfg_fcp_wq_count; @@ -7271,15 +7275,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) if (phba->sli_rev == LPFC_SLI_REV4) { if (max_xri <= 100) - return 4; + return 10; else if (max_xri <= 256) - return 8; + return 25; else if (max_xri <= 512) - return 16; + return 50; else if (max_xri <= 1024) - return 32; + return 100; else - return 48; + return 150; } else return 0; } @@ -8117,15 +8121,15 @@ lpfc_exit(void) if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); if (_dump_buf_data) { - printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " - "at 0x%p\n", + printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " + "_dump_buf_data at 0x%p\n", (1L << _dump_buf_data_order), _dump_buf_data); free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); } if (_dump_buf_dif) { - printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " - "at 0x%p\n", + printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " + "_dump_buf_dif at 0x%p\n", (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 2a38d94654bc..500a6b6e778e 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -25,8 +25,8 @@ #include #include - #include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -1135,7 +1135,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) /* Otherwise we setup specific rctl / type masks for this ring */ for (i = 0; i < pring->num_mask; i++) { mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; - if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) + if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) mb->un.varCfgRing.rrRegs[i].rmask = 0xff; else mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; @@ -1657,9 +1657,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, /* Allocate record for keeping SGE virtual addresses */ mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), GFP_KERNEL); - if (!mbox->sge_array) + if (!mbox->sge_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2527 Failed to allocate non-embedded SGE " + "array.\n"); return 0; - + } for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { /* The DMA memory is always allocated in the length of a * page even though the last SGE might not fill up to a diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index e25179193a82..bcddb6c1a148 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -61,20 +61,22 @@ static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void -lpfc_debug_save_data(struct scsi_cmnd *cmnd) +lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) { void *src, *dst; struct scatterlist *sgde = scsi_sglist(cmnd); if (!_dump_buf_data) { - printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", __func__); return; } if (!sgde) { - printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9051 BLKGRD: ERROR: data scatterlist is null\n"); return; } @@ -88,19 +90,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd) } static void -lpfc_debug_save_dif(struct scsi_cmnd *cmnd) +lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) { void *src, *dst; struct scatterlist *sgde = scsi_prot_sglist(cmnd); if (!_dump_buf_dif) { - printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", __func__); return; } if (!sgde) { - printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9053 BLKGRD: ERROR: prot scatterlist is null\n"); return; } @@ -1024,7 +1028,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - printk(KERN_ERR "%s: Too many sg segments from " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9064 BLKGRD: %s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); @@ -1112,7 +1117,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * with the cmd */ static int -lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) +lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) { uint8_t guard_type = scsi_host_get_guard(sc->device->host); uint8_t ret_prof = LPFC_PROF_INVALID; @@ -1136,7 +1141,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) case SCSI_PROT_NORMAL: default: - printk(KERN_ERR "Bad op/guard:%d/%d combination\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9063 BLKGRD:Bad op/guard:%d/%d combination\n", scsi_get_prot_op(sc), guard_type); break; @@ -1157,7 +1163,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) case SCSI_PROT_WRITE_STRIP: case SCSI_PROT_NORMAL: default: - printk(KERN_ERR "Bad op/guard:%d/%d combination\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9075 BLKGRD: Bad op/guard:%d/%d combination\n", scsi_get_prot_op(sc), guard_type); break; } @@ -1259,7 +1266,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint16_t apptagmask, apptagval; pde1 = (struct lpfc_pde *) bpl; - prof = lpfc_sc_to_sli_prof(sc); + prof = lpfc_sc_to_sli_prof(phba, sc); if (prof == LPFC_PROF_INVALID) goto out; @@ -1359,7 +1366,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, return 0; } - prof = lpfc_sc_to_sli_prof(sc); + prof = lpfc_sc_to_sli_prof(phba, sc); if (prof == LPFC_PROF_INVALID) goto out; @@ -1408,7 +1415,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { if (!sgde) { - printk(KERN_ERR "%s Invalid data segment\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9065 BLKGRD:%s Invalid data segment\n", __func__); return 0; } @@ -1462,7 +1470,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag += protgrp_blks; } else { /* if we're here, we have a bug */ - printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9054 BLKGRD: bug in %s\n", __func__); } } while (!alldone); @@ -1544,8 +1553,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, lpfc_cmd->seg_cnt = datasegcnt; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - printk(KERN_ERR "%s: Too many sg segments from " - "dma_map_sg. Config %d, seg_cnt %d\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9067 BLKGRD: %s: Too many sg segments" + " from dma_map_sg. Config %d, seg_cnt" + " %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); scsi_dma_unmap(scsi_cmnd); @@ -1579,8 +1590,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, lpfc_cmd->prot_seg_cnt = protsegcnt; if (lpfc_cmd->prot_seg_cnt > phba->cfg_prot_sg_seg_cnt) { - printk(KERN_ERR "%s: Too many prot sg segments " - "from dma_map_sg. Config %d," + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9068 BLKGRD: %s: Too many prot sg " + "segments from dma_map_sg. Config %d," "prot_seg_cnt %d\n", __func__, phba->cfg_prot_sg_seg_cnt, lpfc_cmd->prot_seg_cnt); @@ -1671,23 +1683,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=0x%x bghm=0x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), blk_rq_sectors(cmd->request), bgstat, bghm); spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { - printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" + " Data for %u blocks to debugfs\n", (cmd->cmnd[7] << 8 | cmd->cmnd[8])); - lpfc_debug_save_data(cmd); + lpfc_debug_save_data(phba, cmd); /* If we have a prot sgl, save the DIF buffer */ if (lpfc_prot_group_type(phba, cmd) == LPFC_PG_TYPE_DIF_BUF) { - printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", - (cmd->cmnd[7] << 8 | cmd->cmnd[8])); - lpfc_debug_save_dif(cmd); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " + "Saving DIF for %u blocks to debugfs\n", + (cmd->cmnd[7] << 8 | cmd->cmnd[8])); + lpfc_debug_save_dif(phba, cmd); } _dump_buf_done = 1; @@ -1696,15 +1711,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" + " BlockGuard profile. bgstat:0x%x\n", + bgstat); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " + "Invalid BlockGuard DIF Block. bgstat:0x%x\n", bgstat); ret = (-1); goto out; @@ -1718,7 +1735,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - printk(KERN_ERR "BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9055 BLKGRD: guard_tag error\n"); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -1730,7 +1748,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - printk(KERN_ERR "BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9056 BLKGRD: ref_tag error\n"); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -1742,7 +1761,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - printk(KERN_ERR "BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9061 BLKGRD: app_tag error\n"); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -1763,7 +1783,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ cmd->result = ScsiResult(DID_ERROR, 0); - printk(KERN_ERR "BLKGRD: no errors reported!\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9057 BLKGRD: no errors reported!\n"); } out: @@ -1822,9 +1843,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - printk(KERN_ERR "%s: Too many sg segments from " - "dma_map_sg. Config %d, seg_cnt %d\n", - __func__, phba->cfg_sg_seg_cnt, + lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" + " %s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); scsi_dma_unmap(scsi_cmnd); return 1; @@ -2050,6 +2072,21 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, goto out; } + if (resp_info & RSP_LEN_VALID) { + rsplen = be32_to_cpu(fcprsp->rspRspLen); + if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || + (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "2719 Invalid response length: " + "tgt x%x lun x%x cmnd x%x rsplen x%x\n", + cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + rsplen); + host_status = DID_ERROR; + goto out; + } + } + if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); if (snslen > SCSI_SENSE_BUFFERSIZE) @@ -2074,15 +2111,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, be32_to_cpu(fcprsp->rspRspLen), fcprsp->rspInfo3); - if (resp_info & RSP_LEN_VALID) { - rsplen = be32_to_cpu(fcprsp->rspRspLen); - if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || - (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { - host_status = DID_ERROR; - goto out; - } - } - scsi_set_resid(cmnd, 0); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); @@ -2264,7 +2292,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9031 non-zero BGSTAT " - "on unprotected cmd"); + "on unprotected cmd\n"); } } @@ -2785,9 +2813,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { - printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " - "str=%s without registering for BlockGuard - " - "Rejecting command\n", + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" + " op:%02x str=%s without registering for" + " BlockGuard - Rejecting command\n", cmnd->cmnd[0], scsi_get_prot_op(cmnd), dif_op_str[scsi_get_prot_op(cmnd)]); goto out_fail_command; @@ -2827,61 +2856,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) cmnd->scsi_done = done; if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + if (vport->phba->cfg_enable_bg) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " "str=%s\n", cmnd->cmnd[0], scsi_get_prot_op(cmnd), dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x\n", cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], cmnd->cmnd[9]); - if (cmnd->cmnd[0] == READ_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + if (cmnd->cmnd[0] == READ_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9035 BLKGRD: READ @ sector %llu, " "count %u\n", (unsigned long long)scsi_get_lba(cmnd), blk_rq_sectors(cmnd->request)); - else if (cmnd->cmnd[0] == WRITE_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + else if (cmnd->cmnd[0] == WRITE_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9036 BLKGRD: WRITE @ sector %llu, " "count %u cmd=%p\n", (unsigned long long)scsi_get_lba(cmnd), blk_rq_sectors(cmnd->request), cmnd); + } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x" - " str=%s\n", - cmnd->cmnd[0], scsi_get_prot_op(cmnd), - dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x\n", - cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], - cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], - cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], - cmnd->cmnd[9]); - if (cmnd->cmnd[0] == READ_10) + if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9040 dbg: READ @ sector %llu, " - "count %u\n", - (unsigned long long)scsi_get_lba(cmnd), + "9038 BLKGRD: rcvd unprotected cmd:" + "%02x op:%02x str=%s\n", + cmnd->cmnd[0], scsi_get_prot_op(cmnd), + dif_op_str[scsi_get_prot_op(cmnd)]); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "9039 BLKGRD: CDB: %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x %02x\n", + cmnd->cmnd[0], cmnd->cmnd[1], + cmnd->cmnd[2], cmnd->cmnd[3], + cmnd->cmnd[4], cmnd->cmnd[5], + cmnd->cmnd[6], cmnd->cmnd[7], + cmnd->cmnd[8], cmnd->cmnd[9]); + if (cmnd->cmnd[0] == READ_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "9040 dbg: READ @ sector %llu, " + "count %u\n", + (unsigned long long)scsi_get_lba(cmnd), blk_rq_sectors(cmnd->request)); - else if (cmnd->cmnd[0] == WRITE_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + else if (cmnd->cmnd[0] == WRITE_10) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9041 dbg: WRITE @ sector %llu, " "count %u cmd=%p\n", (unsigned long long)scsi_get_lba(cmnd), blk_rq_sectors(cmnd->request), cmnd); - else - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + else + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9042 dbg: parser not implemented\n"); + } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e8d3e4732a84..9693c777425a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -516,6 +516,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) struct lpfc_sglq *sglq = NULL; uint16_t adj_xri; list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + if (!sglq) + return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; return sglq; @@ -2070,8 +2072,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { - Rctl = FC_ELS_REQ; - Type = FC_ELS_DATA; + Rctl = FC_RCTL_ELS_REQ; + Type = FC_TYPE_ELS; } else { w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); Rctl = w5p->hcsw.Rctl; @@ -2081,8 +2083,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { - Rctl = FC_ELS_REQ; - Type = FC_ELS_DATA; + Rctl = FC_RCTL_ELS_REQ; + Type = FC_TYPE_ELS; w5p->hcsw.Rctl = Rctl; w5p->hcsw.Type = Type; } @@ -4485,7 +4487,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli4_post_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0582 Error %d during sgl post operation", rc); + "0582 Error %d during sgl post operation\n", + rc); rc = -ENODEV; goto out_free_vpd; } @@ -4494,8 +4497,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli4_repost_scsi_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "0383 Error %d during scsi sgl post opeation", - rc); + "0383 Error %d during scsi sgl post " + "operation\n", rc); /* Some Scsi buffers were moved to the abort scsi list */ /* A pci function reset will repost them */ rc = -ENODEV; @@ -5686,7 +5689,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, case CMD_GEN_REQUEST64_CX: if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || (piocb->iocb.un.genreq64.w5.hcsw.Rctl != - FC_FCP_CMND) || + FC_RCTL_DD_UNSOL_CMD) || (piocb->iocb.un.genreq64.w5.hcsw.Type != MENLO_TRANSPORT_TYPE)) @@ -6485,27 +6488,27 @@ lpfc_sli_setup(struct lpfc_hba *phba) lpfc_sli_async_event_handler; pring->num_mask = LPFC_MAX_RING_MASK; pring->prt[0].profile = 0; /* Mask 0 */ - pring->prt[0].rctl = FC_ELS_REQ; - pring->prt[0].type = FC_ELS_DATA; + pring->prt[0].rctl = FC_RCTL_ELS_REQ; + pring->prt[0].type = FC_TYPE_ELS; pring->prt[0].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[1].profile = 0; /* Mask 1 */ - pring->prt[1].rctl = FC_ELS_RSP; - pring->prt[1].type = FC_ELS_DATA; + pring->prt[1].rctl = FC_RCTL_ELS_REP; + pring->prt[1].type = FC_TYPE_ELS; pring->prt[1].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[2].profile = 0; /* Mask 2 */ /* NameServer Inquiry */ - pring->prt[2].rctl = FC_UNSOL_CTL; + pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; /* NameServer */ - pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; + pring->prt[2].type = FC_TYPE_CT; pring->prt[2].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; pring->prt[3].profile = 0; /* Mask 3 */ /* NameServer response */ - pring->prt[3].rctl = FC_SOL_CTL; + pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; /* NameServer */ - pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; + pring->prt[3].type = FC_TYPE_CT; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; /* abort unsolicited sequence */ @@ -8089,7 +8092,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) KERN_ERR, LOG_MBOX | LOG_SLI, "0350 rc should have" - "been MBX_BUSY"); + "been MBX_BUSY\n"); if (rc != MBX_NOT_FINISHED) goto send_current_mbox; } @@ -8118,7 +8121,7 @@ send_current_mbox: if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0349 rc should be " - "MBX_SUCCESS"); + "MBX_SUCCESS\n"); } spin_lock_irqsave(&phba->hbalock, iflag); @@ -10454,8 +10457,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) return xritag; } spin_unlock_irq(&phba->hbalock); - - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2004 Failed to allocate XRI.last XRITAG is %d" " Max XRI is %d, Used XRI is %d\n", phba->sli4_hba.next_xri, @@ -10519,15 +10521,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) lpfc_sli4_mbox_cmd_free(phba, mbox); return -ENOMEM; } - /* Get the first SGE entry from the non-embedded DMA memory */ - if (unlikely(!mbox->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2525 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } viraddr = mbox->sge_array->addr[0]; /* Set up the SGL pages in the non-embedded DMA pages */ @@ -10551,8 +10545,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) sgl_pg_pairs++; } bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); /* Perform endian conversion if necessary */ sgl->word0 = cpu_to_le32(sgl->word0); @@ -10634,15 +10627,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, lpfc_sli4_mbox_cmd_free(phba, mbox); return -ENOMEM; } - /* Get the first SGE entry from the non-embedded DMA memory */ - if (unlikely(!mbox->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2565 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } viraddr = mbox->sge_array->addr[0]; /* Set up the SGL pages in the non-embedded DMA pages */ @@ -11565,6 +11550,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) { LPFC_MBOXQ_t *mboxq; int rc = 0; + int retval = MBX_SUCCESS; uint32_t mbox_tmo; if (vpi == 0) @@ -11575,16 +11561,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) lpfc_init_vpi(phba, mboxq, vpi); mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2022 INIT VPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); - rc = -EIO; + retval = -EIO; } - return rc; + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + + return retval; } /** @@ -11669,13 +11656,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); - if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2526 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; - } virt_addr = mboxq->sge_array->addr[0]; /* * Configure the FCF record for FCFI 0. This is the driver's @@ -11799,13 +11779,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); - if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2527 Failed to get the non-embedded SGE " - "virtual address\n"); - error = -ENOMEM; - goto fail_fcfscan; - } virt_addr = mboxq->sge_array->addr[0]; read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index fc3de6fdd709..1f6cb01e6c6b 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -159,7 +159,7 @@ struct lpfc_fip_param_hdr { #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags -#define FIPP_MODE_ON 0x2 +#define FIPP_MODE_ON 0x1 #define FIPP_MODE_OFF 0x0 #define FIPP_VLAN_VALID 0x1 }; -- cgit v1.2.3-59-g8ed1b From 0d87841997125971b7a39d21d1435054f91884c3 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:16:56 -0400 Subject: [SCSI] lpfc 8.3.5: Add AER support Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 2 + drivers/scsi/lpfc/lpfc_attr.c | 177 ++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_init.c | 96 +++++++++++++++++------ drivers/scsi/lpfc/lpfc_sli.c | 27 +++++++ 4 files changed, 278 insertions(+), 24 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index c618eaf3c0c8..e5ebb5343421 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -534,6 +534,7 @@ struct lpfc_hba { #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ +#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -607,6 +608,7 @@ struct lpfc_hba { uint32_t cfg_enable_bg; uint32_t cfg_enable_fip; uint32_t cfg_log_verbose; + uint32_t cfg_aer_support; lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e058f1018ff2..82005b8ad957 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -2765,6 +2766,179 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, lpfc_link_speed_show, lpfc_link_speed_store); +/* +# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) +# 0 = aer disabled or not supported +# 1 = aer supported and enabled (default) +# Value range is [0,1]. Default value is 1. +*/ + +/** + * lpfc_aer_support_store - Set the adapter for aer support + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string "selective". + * @count: unused variable. + * + * Description: + * If the val is 1 and currently the device's AER capability was not + * enabled, invoke the kernel's enable AER helper routine, trying to + * enable the device's AER capability. If the helper routine enabling + * AER returns success, update the device's cfg_aer_support flag to + * indicate AER is supported by the device; otherwise, if the device + * AER capability is already enabled to support AER, then do nothing. + * + * If the val is 0 and currently the device's AER support was enabled, + * invoke the kernel's disable AER helper routine. After that, update + * the device's cfg_aer_support flag to indicate AER is not supported + * by the device; otherwise, if the device AER capability is already + * disabled from supporting AER, then do nothing. + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0, rc = -EINVAL; + + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + switch (val) { + case 0: + if (phba->hba_flag & HBA_AER_ENABLED) { + rc = pci_disable_pcie_error_reporting(phba->pcidev); + if (!rc) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_AER_ENABLED; + spin_unlock_irq(&phba->hbalock); + phba->cfg_aer_support = 0; + rc = strlen(buf); + } else + rc = -EINVAL; + } else + phba->cfg_aer_support = 0; + rc = strlen(buf); + break; + case 1: + if (!(phba->hba_flag & HBA_AER_ENABLED)) { + rc = pci_enable_pcie_error_reporting(phba->pcidev); + if (!rc) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_AER_ENABLED; + spin_unlock_irq(&phba->hbalock); + phba->cfg_aer_support = 1; + rc = strlen(buf); + } else + rc = -EINVAL; + } else + phba->cfg_aer_support = 1; + rc = strlen(buf); + break; + default: + rc = -EINVAL; + break; + } + return rc; +} + +static int lpfc_aer_support = 1; +module_param(lpfc_aer_support, int, 1); +MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); +lpfc_param_show(aer_support) + +/** + * lpfc_aer_support_init - Set the initial adapters aer support flag + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0,1], then set the adapter's initial + * cfg_aer_support field. It will be up to the driver's probe_one + * routine to determine whether the device's AER support can be set + * or not. + * + * Notes: + * If the value is not in range log a kernel error message, and + * choose the default value of setting AER support and return. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_aer_support_init(struct lpfc_hba *phba, int val) +{ + if (val == 0 || val == 1) { + phba->cfg_aer_support = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2712 lpfc_aer_support attribute value %d out " + "of range, allowed values are 0|1, setting it " + "to default value of 1\n", val); + phba->cfg_aer_support = 1; + return -EINVAL; +} + +static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, + lpfc_aer_support_show, lpfc_aer_support_store); + +/** + * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string "selective". + * @count: unused variable. + * + * Description: + * If the @buf contains 1 and the device currently has the AER support + * enabled, then invokes the kernel AER helper routine + * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable + * error status register. + * + * Notes: + * + * Returns: + * -EINVAL if the buf does not contain the 1 or the device is not currently + * enabled with the AER support. + **/ +static ssize_t +lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val, rc = -1; + + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + if (val == 1 && phba->hba_flag & HBA_AER_ENABLED) + rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); + + if (rc == 0) + return strlen(buf); + else + return -EINVAL; +} + +static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, + lpfc_aer_cleanup_state); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -3068,6 +3242,8 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_prot_sg_seg_cnt, + &dev_attr_lpfc_aer_support, + &dev_attr_lpfc_aer_state_cleanup, NULL, }; @@ -4244,6 +4420,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_enable_fip_init(phba, lpfc_enable_fip); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); + lpfc_aer_support_init(phba, lpfc_aer_support); return; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 12ab1eae47f9..61925836a09e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -7098,6 +7099,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); + if (pdev->is_busmaster) pci_set_master(pdev); @@ -7131,6 +7133,53 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) return 0; } +/** + * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot reset. It + * disables the device interrupt and pci device, and aborts the internal FCP + * pending I/Os. + **/ +static void +lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2710 PCI channel I/O frozen\n"); + /* Disable interrupt and pci device */ + lpfc_sli_disable_intr(phba); + pci_disable_device(phba->pcidev); + /* + * There may be I/Os dropped by the firmware. + * Error iocb (I/O) on txcmplq and let the SCSI layer + * retry it after re-establishing link. + */ + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); +} + +/** + * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot permanently + * disabling. It blocks the SCSI transport layer traffic and flushes the FCP + * pending I/Os. + **/ +static void +lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2711 PCI channel I/O permanent failure\n"); + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + /* Clean up all driver's outstanding SCSI I/Os */ + lpfc_sli_flush_fcp_rings(phba); +} + /** * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. @@ -7145,6 +7194,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) * as desired. * * Return codes + * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ @@ -7153,33 +7203,26 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - if (state == pci_channel_io_perm_failure) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0472 PCI channel I/O permanent failure\n"); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); - /* Clean up all driver's outstanding SCSI I/Os */ - lpfc_sli_flush_fcp_rings(phba); + switch (state) { + case pci_channel_io_normal: + /* Non-fatal error, do nothing */ + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + lpfc_sli_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent failure, prepare for device down */ + lpfc_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; + default: + /* Unknown state, prepare and request slot reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0472 Unknown PCI error state: x%x\n", state); + lpfc_sli_prep_dev_for_reset(phba); + return PCI_ERS_RESULT_NEED_RESET; } - - pci_disable_device(pdev); - /* - * There may be I/Os dropped by the firmware. - * Error iocb (I/O) on txcmplq and let the SCSI layer - * retry it after re-establishing link. - */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); - - /* Disable interrupt */ - lpfc_sli_disable_intr(phba); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; } /** @@ -7259,7 +7302,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + /* Bring the device online */ lpfc_online(phba); + + /* Clean up Advanced Error Reporting (AER) if needed */ + if (phba->hba_flag & HBA_AER_ENABLED) + pci_cleanup_aer_uncorrect_error_status(pdev); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9693c777425a..42d0f1948a7a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -3551,9 +3552,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) struct lpfc_sli *psli; volatile uint32_t word0; void __iomem *to_slim; + uint32_t hba_aer_enabled; spin_lock_irq(&phba->hbalock); + /* Take PCIe device Advanced Error Reporting (AER) state */ + hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; + psli = &phba->sli; /* Restart HBA */ @@ -3593,6 +3598,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) /* Give the INITFF and Post time to settle. */ mdelay(100); + /* Reset HBA AER if it was enabled, note hba_flag was reset above */ + if (hba_aer_enabled) + pci_disable_pcie_error_reporting(phba->pcidev); + lpfc_hba_down_post(phba); return 0; @@ -4062,6 +4071,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) if (rc) goto lpfc_sli_hba_setup_error; + /* Enable PCIe device Advanced Error Reporting (AER) if configured */ + if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { + rc = pci_enable_pcie_error_reporting(phba->pcidev); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2709 This device supports " + "Advanced Error Reporting (AER)\n"); + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_AER_ENABLED; + spin_unlock_irq(&phba->hbalock); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2708 This device does not support " + "Advanced Error Reporting (AER)\n"); + phba->cfg_aer_support = 0; + } + } + if (phba->sli_rev == 3) { phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; -- cgit v1.2.3-59-g8ed1b From 45ed119035b27f240345b06e090d559874e3677a Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:17:02 -0400 Subject: [SCSI] lpfc 8.3.5: fix fcp command polling, add FIP mode, performance optimisations and devloss timout fixes This patch includes the following changes: - Fixed Panic/Hang when using polling mode for fcp commands - Added support for Read_rev mbox bits indicating FIP mode of HBA - Optimize performance of slow-path handling of els responses - Add code to cleanup orphaned unsolicited receive sequences - Fixed Devloss timeout when multiple initiators are in same zone Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 9 +- drivers/scsi/lpfc/lpfc_attr.c | 42 +++- drivers/scsi/lpfc/lpfc_crtn.h | 7 +- drivers/scsi/lpfc/lpfc_els.c | 2 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 19 +- drivers/scsi/lpfc/lpfc_hw4.h | 5 + drivers/scsi/lpfc/lpfc_init.c | 11 +- drivers/scsi/lpfc/lpfc_mbox.c | 5 - drivers/scsi/lpfc/lpfc_scsi.c | 16 +- drivers/scsi/lpfc/lpfc_sli.c | 464 +++++++++++++++++++-------------------- drivers/scsi/lpfc/lpfc_sli.h | 2 +- drivers/scsi/lpfc/lpfc_sli4.h | 2 +- 12 files changed, 307 insertions(+), 277 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e5ebb5343421..ebeddbe86e67 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -110,6 +110,7 @@ struct hbq_dmabuf { uint32_t size; uint32_t tag; struct lpfc_cq_event cq_event; + unsigned long time_stamp; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -405,6 +406,7 @@ struct lpfc_vport { uint8_t stat_data_enabled; uint8_t stat_data_blocked; struct list_head rcv_buffer_list; + unsigned long rcv_buffer_time_stamp; uint32_t vport_flag; #define STATIC_VPORT 1 }; @@ -527,14 +529,16 @@ struct lpfc_hba { #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ #define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ -#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ +#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ #define FCP_XRI_ABORT_EVENT 0x20 #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ -#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ +#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ +#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ + uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -606,7 +610,6 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; - uint32_t cfg_enable_fip; uint32_t cfg_log_verbose; uint32_t cfg_aer_support; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 82005b8ad957..d55befb7cf4c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -100,6 +100,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } +/** + * lpfc_enable_fip_show - Return the fip mode of the HBA + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (phba->hba_flag & HBA_FIP_SUPPORT) + return snprintf(buf, PAGE_SIZE, "1\n"); + else + return snprintf(buf, PAGE_SIZE, "0\n"); +} + static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1134,6 +1156,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, if ((val & 0x3) != val) return -EINVAL; + if (phba->sli_rev == LPFC_SLI_REV4) + val = 0; + spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; @@ -1597,6 +1622,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO, static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); +static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); @@ -3127,15 +3153,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); -/* -# lpfc_enable_fip: When set, FIP is required to start discovery. If not -# set, the driver will add an FCF record manually if the port has no -# FCF records available and start discovery. -# Value range is [0,1]. Default value is 1 (enabled) -*/ -LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery"); - - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -3194,6 +3211,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_num_discovered_ports, &dev_attr_menlo_mgmt_mode, &dev_attr_lpfc_drvr_version, + &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_temp_sensor, &dev_attr_lpfc_log_verbose, &dev_attr_lpfc_lun_queue_depth, @@ -3201,7 +3219,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, - &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, @@ -3256,7 +3273,6 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, - &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, @@ -4412,13 +4428,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->cfg_poll = 0; + else phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); - lpfc_enable_fip_init(phba, lpfc_enable_fip); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 0d450ae3a2d4..650494d622c1 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -49,6 +49,8 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); +void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); +void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); void lpfc_cleanup_rpis(struct lpfc_vport *, int); int lpfc_linkdown(struct lpfc_hba *); void lpfc_linkdown_port(struct lpfc_vport *); @@ -214,7 +216,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); void lpfc_poll_start_timer(struct lpfc_hba *); void lpfc_poll_eratt(unsigned long); -void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); +int +lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, + struct lpfc_sli_ring *, uint32_t); + struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 489ddcd4c584..fe0a33c9b874 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -173,7 +173,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. */ if ((did == Fabric_DID) && - bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && + (phba->hba_flag & HBA_FIP_SUPPORT) && ((elscmd == ELS_CMD_FLOGI) || (elscmd == ELS_CMD_FDISC) || (elscmd == ELS_CMD_LOGO))) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e8689cabe5f7..20fca3f6d43b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -568,7 +568,7 @@ lpfc_work_done(struct lpfc_hba *phba) status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || (pring->flag & LPFC_DEFERRED_RING_EVENT) || - (phba->hba_flag & HBA_RECEIVE_BUFFER)) { + (phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ @@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) void lpfc_port_link_failure(struct lpfc_vport *vport) { + /* Cleanup any outstanding received buffers */ + lpfc_cleanup_rcv_buffers(vport); + /* Cleanup any outstanding RSCN activity */ lpfc_els_flush_rscn(vport); @@ -1282,7 +1285,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) return 0; - if (!phba->cfg_enable_fip) { + if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); @@ -1997,7 +2000,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) * is phase 1 implementation that support FCF index 0 and driver * defaults. */ - if (phba->cfg_enable_fip == 0) { + if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { @@ -4442,7 +4445,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) */ if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || - (phba->cfg_enable_fip == 0)) { + (!(phba->hba_flag & HBA_FIP_SUPPORT))) { spin_unlock_irq(&phba->hbalock); return; } @@ -4615,14 +4618,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba, (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) return; - if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == - FIPP_MODE_ON) - phba->cfg_enable_fip = 1; - - if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == - FIPP_MODE_OFF) - phba->cfg_enable_fip = 0; - if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { phba->valid_vlan = 1; phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 0c65091110cc..4f03f1d876d0 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1601,6 +1601,11 @@ struct lpfc_mbx_read_rev { #define lpfc_mbx_rd_rev_fcoe_SHIFT 20 #define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 #define lpfc_mbx_rd_rev_fcoe_WORD word1 +#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21 +#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003 +#define lpfc_mbx_rd_rev_cee_ver_WORD word1 +#define LPFC_PREDCBX_CEE_MODE 0 +#define LPFC_DCBX_CEE_MODE 1 #define lpfc_mbx_rd_rev_vpd_SHIFT 29 #define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 #define lpfc_mbx_rd_rev_vpd_WORD word1 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 61925836a09e..d7385d258f78 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -853,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { + struct lpfc_vport **vports; LPFC_MBOXQ_t *pmboxq; struct lpfc_dmabuf *buf_ptr; - int retval; + int retval, i; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_rcv_seq_check_edtov(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); + if ((phba->link_state == LPFC_HBA_ERROR) || (phba->pport->load_flag & FC_UNLOADING) || (phba->pport->fc_flag & FC_OFFLINE_MODE)) @@ -3519,7 +3526,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Driver internel slow-path CQ Event pool */ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); /* Response IOCB work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); + INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); /* Asynchronous event CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); /* Fast-path XRI aborted CQ Event work queue list */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 500a6b6e778e..51c9a1f576f6 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1759,11 +1759,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); - if (phba->cfg_enable_fip) - bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); - else - bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1); - /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bcddb6c1a148..f5ab5dd9bbbf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2773,7 +2773,9 @@ void lpfc_poll_timeout(unsigned long ptr) struct lpfc_hba *phba = (struct lpfc_hba *) ptr; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - lpfc_sli_poll_fcp_ring (phba); + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -2932,7 +2934,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - lpfc_sli_poll_fcp_ring(phba); + spin_unlock(shost->host_lock); + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + + spin_lock(shost->host_lock); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -3028,7 +3034,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) } if (phba->cfg_poll & DISABLE_FCP_RING_INT) - lpfc_sli_poll_fcp_ring (phba); + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ @@ -3546,7 +3553,8 @@ lpfc_slave_configure(struct scsi_device *sdev) rport->dev_loss_tmo = vport->cfg_devloss_tmo; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - lpfc_sli_poll_fcp_ring(phba); + lpfc_sli_handle_fast_ring_event(phba, + &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 42d0f1948a7a..c4b19d094d39 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -59,7 +59,9 @@ typedef enum _lpfc_iocb_type { static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, - uint8_t *, uint32_t *); + uint8_t *, uint32_t *); +static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, + struct lpfc_iocbq *); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); static IOCB_t * @@ -2329,168 +2331,6 @@ void lpfc_poll_eratt(unsigned long ptr) return; } -/** - * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode - * @phba: Pointer to HBA context object. - * - * This function is called from lpfc_queuecommand, lpfc_poll_timeout, - * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING - * is enabled. - * - * The caller does not hold any lock. - * The function processes each response iocb in the response ring until it - * finds an iocb with LE bit set and chains all the iocbs upto the iocb with - * LE bit set. The function will call the completion handler of the command iocb - * if the response iocb indicates a completion for a command iocb or it is - * an abort completion. - **/ -void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; - IOCB_t *irsp = NULL; - IOCB_t *entry = NULL; - struct lpfc_iocbq *cmdiocbq = NULL; - struct lpfc_iocbq rspiocbq; - struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; - uint32_t status; - uint32_t portRspPut, portRspMax; - int type; - uint32_t rsp_cmpl = 0; - uint32_t ha_copy; - unsigned long iflags; - - pring->stats.iocb_event++; - - /* - * The next available response entry should never exceed the maximum - * entries. If it does, treat it as an adapter hardware error. - */ - portRspMax = pring->numRiocb; - portRspPut = le32_to_cpu(pgp->rspPutInx); - if (unlikely(portRspPut >= portRspMax)) { - lpfc_sli_rsp_pointers_error(phba, pring); - return; - } - - rmb(); - while (pring->rspidx != portRspPut) { - entry = lpfc_resp_iocb(phba, pring); - if (++pring->rspidx >= portRspMax) - pring->rspidx = 0; - - lpfc_sli_pcimem_bcopy((uint32_t *) entry, - (uint32_t *) &rspiocbq.iocb, - phba->iocb_rsp_size); - irsp = &rspiocbq.iocb; - type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); - pring->stats.iocb_rsp++; - rsp_cmpl++; - - if (unlikely(irsp->ulpStatus)) { - /* Rsp ring error: IOCB */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0326 Rsp Ring %d error: IOCB Data: " - "x%x x%x x%x x%x x%x x%x x%x x%x\n", - pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(uint32_t *)&irsp->un1, - *((uint32_t *)&irsp->un1 + 1)); - } - - switch (type) { - case LPFC_ABORT_IOCB: - case LPFC_SOL_IOCB: - /* - * Idle exchange closed via ABTS from port. No iocb - * resources need to be recovered. - */ - if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0314 IOCB cmd 0x%x " - "processed. Skipping " - "completion", - irsp->ulpCommand); - break; - } - - spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, - &rspiocbq); - spin_unlock_irqrestore(&phba->hbalock, iflags); - if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, - &rspiocbq); - } - break; - default: - if (irsp->ulpCommand == CMD_ADAPTER_MSG) { - char adaptermsg[LPFC_MAX_ADPTMSG]; - memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); - memcpy(&adaptermsg[0], (uint8_t *) irsp, - MAX_MSG_DATA); - dev_warn(&((phba->pcidev)->dev), - "lpfc%d: %s\n", - phba->brd_no, adaptermsg); - } else { - /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0321 Unknown IOCB command " - "Data: x%x, x%x x%x x%x x%x\n", - type, irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); - } - break; - } - - /* - * The response IOCB has been processed. Update the ring - * pointer in SLIM. If the port response put pointer has not - * been updated, sync the pgp->rspPutInx and fetch the new port - * response put pointer. - */ - writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); - - if (pring->rspidx == portRspPut) - portRspPut = le32_to_cpu(pgp->rspPutInx); - } - - ha_copy = readl(phba->HAregaddr); - ha_copy >>= (LPFC_FCP_RING * 4); - - if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { - spin_lock_irqsave(&phba->hbalock, iflags); - pring->stats.iocb_rsp_full++; - status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); - writel(status, phba->CAregaddr); - readl(phba->CAregaddr); - spin_unlock_irqrestore(&phba->hbalock, iflags); - } - if ((ha_copy & HA_R0CE_RSP) && - (pring->flag & LPFC_CALL_RING_AVAILABLE)) { - spin_lock_irqsave(&phba->hbalock, iflags); - pring->flag &= ~LPFC_CALL_RING_AVAILABLE; - pring->stats.iocb_cmd_empty++; - - /* Force update of the local copy of cmdGetInx */ - pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); - lpfc_sli_resume_iocb(phba, pring); - - if ((pring->lpfc_sli_cmd_available)) - (pring->lpfc_sli_cmd_available) (phba, pring); - - spin_unlock_irqrestore(&phba->hbalock, iflags); - } - - return; -} /** * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring @@ -2507,9 +2347,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) * an abort completion. The function will call lpfc_sli_process_unsol_iocb * function if this is an unsolicited iocb. * This routine presumes LPFC_FCP_RING handling and doesn't bother - * to check it explicitly. This function always returns 1. - **/ -static int + * to check it explicitly. + */ +int lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { @@ -2539,6 +2379,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflag); return 1; } + if (phba->fcp_ring_in_use) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return 1; + } else + phba->fcp_ring_in_use = 1; rmb(); while (pring->rspidx != portRspPut) { @@ -2609,10 +2454,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { - if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, - &rspiocbq); - } else { spin_unlock_irqrestore(&phba->hbalock, iflag); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, @@ -2620,7 +2461,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, spin_lock_irqsave(&phba->hbalock, iflag); } - } break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -2680,6 +2520,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, } + phba->fcp_ring_in_use = 0; spin_unlock_irqrestore(&phba->hbalock, iflag); return rc; } @@ -3027,10 +2868,13 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event; unsigned long iflag; - while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { + spin_lock_irqsave(&phba->hbalock, iflag); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irqrestore(&phba->hbalock, iflag); + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ spin_lock_irqsave(&phba->hbalock, iflag); - list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, + list_remove_head(&phba->sli4_hba.sp_queue_event, cq_event, struct lpfc_cq_event, list); spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3038,7 +2882,12 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, case CQE_CODE_COMPL_WQE: irspiocbq = container_of(cq_event, struct lpfc_iocbq, cq_event); - lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + /* Translate ELS WCQE to response IOCBQ */ + irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, + irspiocbq); + if (irspiocbq) + lpfc_sli_sp_handle_rspiocb(phba, pring, + irspiocbq); break; case CQE_CODE_RECEIVE: dmabuf = container_of(cq_event, struct hbq_dmabuf, @@ -4368,6 +4217,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) phba->hba_flag |= HBA_FCOE_SUPPORT; + + if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == + LPFC_DCBX_CEE_MODE) + phba->hba_flag |= HBA_FIP_SUPPORT; + else + phba->hba_flag &= ~HBA_FIP_SUPPORT; + if (phba->sli_rev != LPFC_SLI_REV4 || !(phba->hba_flag & HBA_FCOE_SUPPORT)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -4541,10 +4397,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_vpd; } - if (phba->cfg_enable_fip) - bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); - else - bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); /* Set up all the queues to the device */ rc = lpfc_sli4_queue_setup(phba); @@ -5905,7 +5757,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint16_t xritag; struct ulp_bde64 *bpl = NULL; - fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); + fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ if (iocbq->iocb_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; @@ -7046,8 +6898,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; spin_lock_irq(&phba->hbalock); - if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) - abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; + if (phba->sli_rev < LPFC_SLI_REV4) { + if (abort_iotag != 0 && + abort_iotag <= phba->sli.last_iotag) + abort_iocb = + phba->sli.iocbq_lookup[abort_iotag]; + } else + /* For sli4 the abort_tag is the XRI, + * so the abort routine puts the iotag of the iocb + * being aborted in the context field of the abort + * IOCB. + */ + abort_iocb = phba->sli.iocbq_lookup[abort_context]; lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, "0327 Cannot abort els iocb %p " @@ -7061,9 +6923,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * might have completed already. Do not free it again. */ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - spin_unlock_irq(&phba->hbalock); - lpfc_sli_release_iocbq(phba, cmdiocb); - return; + if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { + spin_unlock_irq(&phba->hbalock); + lpfc_sli_release_iocbq(phba, cmdiocb); + return; + } + /* For SLI4 the ulpContext field for abort IOCB + * holds the iotag of the IOCB being aborted so + * the local abort_context needs to be reset to + * match the aborted IOCBs ulpContext. + */ + if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) + abort_context = abort_iocb->iocb.ulpContext; } /* * make sure we have the right iocbq before taking it @@ -7182,8 +7053,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt = &abtsiocbp->iocb; iabt->un.acxri.abortType = ABORT_TYPE_ABTS; iabt->un.acxri.abortContextTag = icmd->ulpContext; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; + iabt->un.acxri.abortContextTag = cmdiocb->iotag; + } else iabt->un.acxri.abortIoTag = icmd->ulpIoTag; iabt->ulpLe = 1; @@ -8421,7 +8294,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, sizeof(struct lpfc_iocbq) - offset); - pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe; /* Map WCQE parameters into irspiocb parameters */ pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); if (pIocbOut->iocb_flag & LPFC_IO_FCP) @@ -8435,6 +8307,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; } +/** + * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an ELS work-queue completion event and construct + * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common + * discovery engine to handle. + * + * Return: Pointer to the receive IOCBQ, NULL otherwise. + **/ +static struct lpfc_iocbq * +lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, + struct lpfc_iocbq *irspiocbq) +{ + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_wcqe_complete *wcqe; + unsigned long iflags; + + wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; + spin_lock_irqsave(&phba->hbalock, iflags); + pring->stats.iocb_event++; + /* Look up the ELS command IOCB and create pseudo response IOCB */ + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0386 ELS complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + lpfc_sli_release_iocbq(phba, irspiocbq); + return NULL; + } + + /* Fake the irspiocbq and copy necessary response information */ + lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); + + return irspiocbq; +} + /** * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event * @phba: Pointer to HBA context object. @@ -8625,46 +8540,26 @@ static bool lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_wcqe_complete *wcqe) { - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *cmdiocbq; struct lpfc_iocbq *irspiocbq; unsigned long iflags; - bool workposted = false; - spin_lock_irqsave(&phba->hbalock, iflags); - pring->stats.iocb_event++; - /* Look up the ELS command IOCB and create pseudo response IOCB */ - cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - spin_unlock_irqrestore(&phba->hbalock, iflags); - - if (unlikely(!cmdiocbq)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0386 ELS complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - return workposted; - } - - /* Fake the irspiocbq and copy necessary response information */ + /* Get an irspiocbq for later ELS response processing use */ irspiocbq = lpfc_sli_get_iocbq(phba); if (!irspiocbq) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 Failed to allocate an iocbq\n"); - return workposted; + return false; } - lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); - /* Add the irspiocb to the response IOCB work list */ + /* Save off the slow-path queue event for work thread to process */ + memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&irspiocbq->cq_event.list, - &phba->sli4_hba.sp_rspiocb_work_queue); - /* Indicate ELS ring attention */ - phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); + &phba->sli4_hba.sp_queue_event); + phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - return workposted; + return true; } /** @@ -8769,8 +8664,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) unsigned long iflags; lpfc_sli4_rq_release(hrq, drq); - if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE) - goto out; if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) goto out; @@ -8789,9 +8682,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); /* save off the frame for the word thread to process */ list_add_tail(&dma_buf->cq_event.list, - &phba->sli4_hba.sp_rspiocb_work_queue); + &phba->sli4_hba.sp_queue_event); /* Frame received */ - phba->hba_flag |= HBA_RECEIVE_BUFFER; + phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; @@ -8806,7 +8699,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) } out: return workposted; - } /** @@ -8824,38 +8716,38 @@ static bool lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { - struct lpfc_wcqe_complete wcqe; + struct lpfc_cqe cqevt; bool workposted = false; /* Copy the work queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); /* Check and process for different type of WCQE and dispatch */ - switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + switch (bf_get(lpfc_cqe_code, &cqevt)) { case CQE_CODE_COMPL_WQE: - /* Process the WQ complete event */ + /* Process the WQ/RQ complete event */ workposted = lpfc_sli4_sp_handle_els_wcqe(phba, - (struct lpfc_wcqe_complete *)&wcqe); + (struct lpfc_wcqe_complete *)&cqevt); break; case CQE_CODE_RELEASE_WQE: /* Process the WQ release event */ lpfc_sli4_sp_handle_rel_wcqe(phba, - (struct lpfc_wcqe_release *)&wcqe); + (struct lpfc_wcqe_release *)&cqevt); break; case CQE_CODE_XRI_ABORTED: /* Process the WQ XRI abort event */ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, - (struct sli4_wcqe_xri_aborted *)&wcqe); + (struct sli4_wcqe_xri_aborted *)&cqevt); break; case CQE_CODE_RECEIVE: /* Process the RQ event */ workposted = lpfc_sli4_sp_handle_rcqe(phba, - (struct lpfc_rcqe *)&wcqe); + (struct lpfc_rcqe *)&cqevt); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0388 Not a valid WCQE code: x%x\n", - bf_get(lpfc_wcqe_c_code, &wcqe)); + bf_get(lpfc_cqe_code, &cqevt)); break; } return workposted; @@ -10840,6 +10732,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, return vport; } +/** + * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp + * @vport: The vport to work on. + * + * This function updates the receive sequence time stamp for this vport. The + * receive sequence time stamp indicates the time that the last frame of the + * the sequence that has been idle for the longest amount of time was received. + * the driver uses this time stamp to indicate if any received sequences have + * timed out. + **/ +void +lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf; + struct hbq_dmabuf *dmabuf = NULL; + + /* get the oldest sequence on the rcv list */ + h_buf = list_get_first(&vport->rcv_buffer_list, + struct lpfc_dmabuf, list); + if (!h_buf) + return; + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + vport->rcv_buffer_time_stamp = dmabuf->time_stamp; +} + +/** + * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. + * @vport: The vport that the received sequences were sent to. + * + * This function cleans up all outstanding received sequences. This is called + * by the driver when a link event or user action invalidates all the received + * sequences. + **/ +void +lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf, *hnext; + struct lpfc_dmabuf *d_buf, *dnext; + struct hbq_dmabuf *dmabuf = NULL; + + /* start with the oldest sequence on the rcv list */ + list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + list_del_init(&dmabuf->hbuf.list); + list_for_each_entry_safe(d_buf, dnext, + &dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); + } +} + +/** + * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. + * @vport: The vport that the received sequences were sent to. + * + * This function determines whether any received sequences have timed out by + * first checking the vport's rcv_buffer_time_stamp. If this time_stamp + * indicates that there is at least one timed out sequence this routine will + * go through the received sequences one at a time from most inactive to most + * active to determine which ones need to be cleaned up. Once it has determined + * that a sequence needs to be cleaned up it will simply free up the resources + * without sending an abort. + **/ +void +lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) +{ + struct lpfc_dmabuf *h_buf, *hnext; + struct lpfc_dmabuf *d_buf, *dnext; + struct hbq_dmabuf *dmabuf = NULL; + unsigned long timeout; + int abort_count = 0; + + timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + + vport->rcv_buffer_time_stamp); + if (list_empty(&vport->rcv_buffer_list) || + time_before(jiffies, timeout)) + return; + /* start with the oldest sequence on the rcv list */ + list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { + dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + + dmabuf->time_stamp); + if (time_before(jiffies, timeout)) + break; + abort_count++; + list_del_init(&dmabuf->hbuf.list); + list_for_each_entry_safe(d_buf, dnext, + &dmabuf->dbuf.list, list) { + list_del_init(&d_buf->list); + lpfc_in_buf_free(vport->phba, d_buf); + } + lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); + } + if (abort_count) + lpfc_update_rcv_time_stamp(vport); +} + /** * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame @@ -10863,6 +10854,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) struct hbq_dmabuf *temp_dmabuf = NULL; INIT_LIST_HEAD(&dmabuf->dbuf.list); + dmabuf->time_stamp = jiffies; new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; /* Use the hdr_buf to find the sequence that this frame belongs to */ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { @@ -10881,6 +10873,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) * Queue the buffer on the vport's rcv_buffer_list. */ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + lpfc_update_rcv_time_stamp(vport); return dmabuf; } temp_hdr = seq_dmabuf->hbuf.virt; @@ -10888,8 +10881,13 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) list_del_init(&seq_dmabuf->hbuf.list); list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); + lpfc_update_rcv_time_stamp(vport); return dmabuf; } + /* move this sequence to the tail to indicate a young sequence */ + list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); + seq_dmabuf->time_stamp = jiffies; + lpfc_update_rcv_time_stamp(vport); /* find the correct place in the sequence to insert this frame */ list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); @@ -11148,6 +11146,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* remove from receive buffer list */ list_del_init(&seq_dmabuf->hbuf.list); + lpfc_update_rcv_time_stamp(vport); /* get the Remote Port's SID */ sid = sli4_sid_from_fc_hdr(fc_hdr); /* Get an iocbq struct to fill in. */ @@ -11274,11 +11273,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, struct lpfc_vport *vport; uint32_t fcfi; - /* Clear hba flag and get all received buffers into the cmplq */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_RECEIVE_BUFFER; - spin_unlock_irq(&phba->hbalock); - /* Process each received buffer */ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; /* check to see if this a valid type of frame */ @@ -11309,9 +11303,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, /* If not last frame in sequence continue processing frames. */ if (!lpfc_seq_complete(seq_dmabuf)) { /* - * When saving off frames post a new one and mark this - * frame to be freed when it is finished. - **/ + * When saving off frames post a new one and mark this + * frame to be freed when it is finished. + **/ lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); dmabuf->tag = -1; return; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 0e518b12f414..7b12663909a7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -49,6 +49,7 @@ struct lpfc_iocbq { struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + struct lpfc_cq_event cq_event; IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ @@ -79,7 +80,6 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - struct lpfc_cq_event cq_event; }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 1f6cb01e6c6b..4a9cf674555e 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -352,7 +352,7 @@ struct lpfc_sli4_hba { unsigned long *rpi_bmask; uint16_t rpi_count; struct lpfc_sli4_flags sli4_flags; - struct list_head sp_rspiocb_work_queue; + struct list_head sp_queue_event; struct list_head sp_cqe_event_pool; struct list_head sp_asynce_work_queue; struct list_head sp_fcp_xri_aborted_work_queue; -- cgit v1.2.3-59-g8ed1b From 0d48fcca1ff5d106b0ac6770a31b13e3630b244a Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 2 Oct 2009 15:17:08 -0400 Subject: [SCSI] lpfc 8.3.5: Update the lpfc driver version to 8.3.5 Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 9ae20af4bdb7..49727c285a68 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,8 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.4" - +#define LPFC_DRIVER_VERSION "8.3.5" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" -- cgit v1.2.3-59-g8ed1b From d531b37929f412de09e9ad711fdd5b04fa39aca1 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:39:25 +0200 Subject: [SCSI] libosd: osd_dev_is_ver1 - Minor API cleanup define a new osd_dev_is_ver1 that operates on devices and the old osd_req_is_ver1 uses that new API. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- include/scsi/osd_initiator.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index 02bd9f716357..f787d24d3bab 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -84,6 +84,15 @@ static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) #endif } +static inline bool osd_dev_is_ver1(struct osd_dev *od) +{ +#ifdef OSD_VER1_SUPPORT + return od->version == OSD_VER1; +#else + return false; +#endif +} + struct osd_request; typedef void (osd_req_done_fn)(struct osd_request *or, void *private); @@ -120,14 +129,9 @@ struct osd_request { int async_error; }; -/* OSD Version control */ static inline bool osd_req_is_ver1(struct osd_request *or) { -#ifdef OSD_VER1_SUPPORT - return or->osd_dev->version == OSD_VER1; -#else - return false; -#endif + return osd_dev_is_ver1(or->osd_dev); } /* -- cgit v1.2.3-59-g8ed1b From c7d2dc2a204fa37bdf607d4d062dfd14e392aaf1 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:41:03 +0200 Subject: [SCSI] libosd: osd_sense: OSD_CFO_PERMISSIONS Add one more important cdb_field_offset that can be returned with scsi_invalid_field_in_cdb. It is the offset of the permissions_bit_mask field in the capabilities structure. Interestingly, the offset is the same for V1/V2 Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- include/scsi/osd_sense.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h index ff9b33c773c7..91db543a5502 100644 --- a/include/scsi/osd_sense.h +++ b/include/scsi/osd_sense.h @@ -255,6 +255,9 @@ enum osdv2_cdb_field_offset { OSD_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v2.start_address), OSD_CFO_PARTITION_ID = OSD_CDB_OFFSET(partition), OSD_CFO_OBJECT_ID = OSD_CDB_OFFSET(object), + OSD_CFO_PERMISSIONS = sizeof(struct osd_cdb_head) + + offsetof(struct osd_capability_head, + permissions_bit_mask), }; #endif /* ndef __OSD_SENSE_H__ */ -- cgit v1.2.3-59-g8ed1b From 89f5e1f2f13b1079b8d7ff7d3ade345b7ad7c009 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:44:02 +0200 Subject: [SCSI] osduld: Ref-counting bug fix If scsi has released the device (logout), and exofs has last reference on the osduld_device it will be freed by osd_uld_release() within the call to fput(). But this will oops in cdev_release() which is called after the fops->release. (cdev is embedded within osduld_device). __uld_get/put pair makes sure we have a cdev for the duration of fput() Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_uld.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 0bdef3390902..1ea6447f9418 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -224,7 +224,15 @@ void osduld_put_device(struct osd_dev *od) BUG_ON(od->scsi_device != oud->od.scsi_device); + /* If scsi has released the device (logout), and exofs has last + * reference on oud it will be freed by above osd_uld_release + * within fput below. But this will oops in cdev_release which + * is called after the fops->release. __uld_get/put pair makes + * sure we have a cdev for the duration of fput + */ + __uld_get(oud); fput(od->file); + __uld_put(oud); kfree(od); } } -- cgit v1.2.3-59-g8ed1b From d6ae4333e648492721a098bdc329bbd82d25eb67 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 29 Nov 2009 16:25:26 +0200 Subject: [SCSI] osduld: Use device->release instead of internal kref The true logic of this patch will be clear in the next patch where we use the class_find_device() API. When doing so the use of an internal kref leaves us a narrow window where a find is started while the actual object can go away. Using the device's kobj reference solves this problem because now the same kref is used for both operations. (Remove and find) Core changes * Embed a struct device in uld_ structure and use device_register instead of devie_create. Set __remove to be the device release function. * __uld_get/put is just get_/put_device. Now every thing is accounted for on the device object. Internal kref is removed. * At __remove() we can safely de-allocate the uld_ structure. (The function has moved to avoid forward declaration) Some cleanups * Use class register/unregister is cleaner for this driver now. * cdev ref-counting games are no longer necessary I have incremented the device version string in case of new bugs. Note: Previous bugfix of taking the reference around fput() still applies. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_uld.c | 162 ++++++++++++++++++++----------------------- include/scsi/osd_initiator.h | 1 - 2 files changed, 77 insertions(+), 86 deletions(-) diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 1ea6447f9418..fc6fc1c4d4d1 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -71,8 +71,7 @@ #define SCSI_OSD_MAX_MINOR 64 static const char osd_name[] = "osd"; -static const char *osd_version_string = "open-osd 0.1.0"; -const char osd_symlink[] = "scsi_osd"; +static const char *osd_version_string = "open-osd 0.2.0"; MODULE_AUTHOR("Boaz Harrosh "); MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); @@ -82,15 +81,24 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD); struct osd_uld_device { int minor; - struct kref kref; + struct device class_dev; struct cdev cdev; struct osd_dev od; struct gendisk *disk; - struct device *class_member; }; -static void __uld_get(struct osd_uld_device *oud); -static void __uld_put(struct osd_uld_device *oud); +struct osd_dev_handle { + struct osd_dev od; + struct file *file; + struct osd_uld_device *oud; +} ; + +static DEFINE_IDA(osd_minor_ida); + +static struct class osd_uld_class = { + .owner = THIS_MODULE, + .name = "scsi_osd", +}; /* * Char Device operations @@ -101,7 +109,7 @@ static int osd_uld_open(struct inode *inode, struct file *file) struct osd_uld_device *oud = container_of(inode->i_cdev, struct osd_uld_device, cdev); - __uld_get(oud); + get_device(&oud->class_dev); /* cache osd_uld_device on file handle */ file->private_data = oud; OSD_DEBUG("osd_uld_open %p\n", oud); @@ -114,7 +122,7 @@ static int osd_uld_release(struct inode *inode, struct file *file) OSD_DEBUG("osd_uld_release %p\n", file->private_data); file->private_data = NULL; - __uld_put(oud); + put_device(&oud->class_dev); return 0; } @@ -177,7 +185,7 @@ static const struct file_operations osd_fops = { struct osd_dev *osduld_path_lookup(const char *name) { struct osd_uld_device *oud; - struct osd_dev *od; + struct osd_dev_handle *odh; struct file *file; int error; @@ -186,8 +194,8 @@ struct osd_dev *osduld_path_lookup(const char *name) return ERR_PTR(-EINVAL); } - od = kzalloc(sizeof(*od), GFP_KERNEL); - if (!od) + odh = kzalloc(sizeof(*odh), GFP_KERNEL); + if (unlikely(!odh)) return ERR_PTR(-ENOMEM); file = filp_open(name, O_RDWR, 0); @@ -203,37 +211,39 @@ struct osd_dev *osduld_path_lookup(const char *name) oud = file->private_data; - *od = oud->od; - od->file = file; + odh->od = oud->od; + odh->file = file; + odh->oud = oud; - return od; + return &odh->od; close_file: fput(file); free_od: - kfree(od); + kfree(odh); return ERR_PTR(error); } EXPORT_SYMBOL(osduld_path_lookup); void osduld_put_device(struct osd_dev *od) { - if (od && !IS_ERR(od)) { - struct osd_uld_device *oud = od->file->private_data; + struct osd_dev_handle *odh = + container_of(od, struct osd_dev_handle, od); + struct osd_uld_device *oud = odh->oud; BUG_ON(od->scsi_device != oud->od.scsi_device); /* If scsi has released the device (logout), and exofs has last * reference on oud it will be freed by above osd_uld_release * within fput below. But this will oops in cdev_release which - * is called after the fops->release. __uld_get/put pair makes + * is called after the fops->release. A get_/put_ pair makes * sure we have a cdev for the duration of fput */ - __uld_get(oud); - fput(od->file); - __uld_put(oud); - kfree(od); + get_device(&oud->class_dev); + fput(odh->file); + put_device(&oud->class_dev); + kfree(odh); } } EXPORT_SYMBOL(osduld_put_device); @@ -264,8 +274,27 @@ static int __detect_osd(struct osd_uld_device *oud) return 0; } -static struct class *osd_sysfs_class; -static DEFINE_IDA(osd_minor_ida); +static void __remove(struct device *dev) +{ + struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, + class_dev); + struct scsi_device *scsi_device = oud->od.scsi_device; + + if (oud->cdev.owner) + cdev_del(&oud->cdev); + + osd_dev_fini(&oud->od); + scsi_device_put(scsi_device); + + OSD_INFO("osd_remove %s\n", + oud->disk ? oud->disk->disk_name : NULL); + + if (oud->disk) + put_disk(oud->disk); + ida_remove(&osd_minor_ida, oud->minor); + + kfree(oud); +} static int osd_probe(struct device *dev) { @@ -297,7 +326,6 @@ static int osd_probe(struct device *dev) if (NULL == oud) goto err_retract_minor; - kref_init(&oud->kref); dev_set_drvdata(dev, oud); oud->minor = minor; @@ -335,18 +363,25 @@ static int osd_probe(struct device *dev) OSD_ERR("cdev_add failed\n"); goto err_put_disk; } - kobject_get(&oud->cdev.kobj); /* 2nd ref see osd_remove() */ - - /* class_member */ - oud->class_member = device_create(osd_sysfs_class, dev, - MKDEV(SCSI_OSD_MAJOR, oud->minor), "%s", disk->disk_name); - if (IS_ERR(oud->class_member)) { - OSD_ERR("class_device_create failed\n"); - error = PTR_ERR(oud->class_member); + + /* class device member */ + oud->class_dev.devt = oud->cdev.dev; + oud->class_dev.class = &osd_uld_class; + oud->class_dev.parent = dev; + oud->class_dev.release = __remove; + error = dev_set_name(&oud->class_dev, disk->disk_name); + if (error) { + OSD_ERR("dev_set_name failed => %d\n", error); goto err_put_cdev; } - dev_set_drvdata(oud->class_member, oud); + error = device_register(&oud->class_dev); + if (error) { + OSD_ERR("device_register failed => %d\n", error); + goto err_put_cdev; + } + + get_device(&oud->class_dev); OSD_INFO("osd_probe %s\n", disk->disk_name); return 0; @@ -375,54 +410,12 @@ static int osd_remove(struct device *dev) scsi_device); } - if (oud->class_member) - device_destroy(osd_sysfs_class, - MKDEV(SCSI_OSD_MAJOR, oud->minor)); - - /* We have 2 references to the cdev. One is released here - * and also takes down the /dev/osdX mapping. The second - * Will be released in __remove() after all users have released - * the osd_uld_device. - */ - if (oud->cdev.owner) - cdev_del(&oud->cdev); + device_unregister(&oud->class_dev); - __uld_put(oud); + put_device(&oud->class_dev); return 0; } -static void __remove(struct kref *kref) -{ - struct osd_uld_device *oud = container_of(kref, - struct osd_uld_device, kref); - struct scsi_device *scsi_device = oud->od.scsi_device; - - /* now let delete the char_dev */ - kobject_put(&oud->cdev.kobj); - - osd_dev_fini(&oud->od); - scsi_device_put(scsi_device); - - OSD_INFO("osd_remove %s\n", - oud->disk ? oud->disk->disk_name : NULL); - - if (oud->disk) - put_disk(oud->disk); - - ida_remove(&osd_minor_ida, oud->minor); - kfree(oud); -} - -static void __uld_get(struct osd_uld_device *oud) -{ - kref_get(&oud->kref); -} - -static void __uld_put(struct osd_uld_device *oud) -{ - kref_put(&oud->kref, __remove); -} - /* * Global driver and scsi registration */ @@ -440,11 +433,10 @@ static int __init osd_uld_init(void) { int err; - osd_sysfs_class = class_create(THIS_MODULE, osd_symlink); - if (IS_ERR(osd_sysfs_class)) { - OSD_ERR("Unable to register sysfs class => %ld\n", - PTR_ERR(osd_sysfs_class)); - return PTR_ERR(osd_sysfs_class); + err = class_register(&osd_uld_class); + if (err) { + OSD_ERR("Unable to register sysfs class => %d\n", err); + return err; } err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), @@ -467,7 +459,7 @@ static int __init osd_uld_init(void) err_out_chrdev: unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); err_out: - class_destroy(osd_sysfs_class); + class_unregister(&osd_uld_class); return err; } @@ -475,7 +467,7 @@ static void __exit osd_uld_exit(void) { scsi_unregister_driver(&osd_driver.gendrv); unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); - class_destroy(osd_sysfs_class); + class_unregister(&osd_uld_class); OSD_INFO("UNLOADED %s\n", osd_version_string); } diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index f787d24d3bab..589e5f0d67b1 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -48,7 +48,6 @@ enum osd_std_version { */ struct osd_dev { struct scsi_device *scsi_device; - struct file *file; unsigned def_timeout; #ifdef OSD_VER1_SUPPORT -- cgit v1.2.3-59-g8ed1b From 2cdd6410e5a1665823f2a048fc7f8f6a8384be1d Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 29 Nov 2009 16:26:45 +0200 Subject: [SCSI] libosd: osd_dev_info: Unique Identification of an OSD device Define an osd_dev_info structure that Uniquely identifies an OSD device lun on the network. The identification is built from unique target attributes and is the same for all network/SAN machines. osduld_info_lookup() - NEW New API that will lookup an osd_dev by its osd_dev_info. This is used by pNFS-objects for cross network global device identification. And by exofs multy-device support, the device info is specified in the on-disk exofs device table. osduld_device_info() - NEW Given an osd_dev handle returns its associated osd_dev_info. The ULD fetches this information at startup and hangs it on each OSD device. (This is a fast operation that can be called at any condition) osduld_device_same() - NEW With a given osd_dev at one hand and an osd_dev_info at another, we would like to know if they are the same device. Two osd_dev handles can be checked by: osduld_device_same(od1, osduld_device_info(od2)); osd_auto_detect_ver() - REVISED Now returns an osd_dev_info structure. Is only called once by ULD as before. See added comments for how to use. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 26 +++++++--- drivers/scsi/osd/osd_uld.c | 100 +++++++++++++++++++++++++++++++++++++-- include/scsi/osd_initiator.h | 38 +++++++++++++-- 3 files changed, 151 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 7a117c18114c..60b7ca1e9bc0 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -73,7 +73,8 @@ static const char *_osd_ver_desc(struct osd_request *or) #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) -static int _osd_print_system_info(struct osd_dev *od, void *caps) +static int _osd_get_print_system_info(struct osd_dev *od, + void *caps, struct osd_dev_info *odi) { struct osd_request *or; struct osd_attr get_attrs[] = { @@ -137,8 +138,12 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", (char *)pFirst); - pFirst = get_attrs[a].val_ptr; - OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); + odi->osdname_len = get_attrs[a].len; + /* Avoid NULL for memcmp optimization 0-length is good enough */ + odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); + if (odi->osdname_len) + memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); + OSD_INFO("OSD_NAME [%s]\n", odi->osdname); a++; pFirst = get_attrs[a++].val_ptr; @@ -171,6 +176,14 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) sid_dump, sizeof(sid_dump), true); OSD_INFO("OSD_SYSTEM_ID(%d)\n" " [%s]\n", len, sid_dump); + + if (unlikely(len > sizeof(odi->systemid))) { + OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). " + "device idetification might not work\n", len); + len = sizeof(odi->systemid); + } + odi->systemid_len = len; + memcpy(odi->systemid, get_attrs[a].val_ptr, len); a++; } out: @@ -178,16 +191,17 @@ out: return ret; } -int osd_auto_detect_ver(struct osd_dev *od, void *caps) +int osd_auto_detect_ver(struct osd_dev *od, + void *caps, struct osd_dev_info *odi) { int ret; /* Auto-detect the osd version */ - ret = _osd_print_system_info(od, caps); + ret = _osd_get_print_system_info(od, caps, odi); if (ret) { osd_dev_set_ver(od, OSD_VER1); OSD_DEBUG("converting to OSD1\n"); - ret = _osd_print_system_info(od, caps); + ret = _osd_get_print_system_info(od, caps, odi); } return ret; diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index fc6fc1c4d4d1..0a90702b3d71 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -84,6 +84,7 @@ struct osd_uld_device { struct device class_dev; struct cdev cdev; struct osd_dev od; + struct osd_dev_info odi; struct gendisk *disk; }; @@ -225,6 +226,72 @@ free_od: } EXPORT_SYMBOL(osduld_path_lookup); +static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len, + const u8 *a2, unsigned a2_len) +{ + if (!a2_len) /* User string is Empty means don't care */ + return true; + + if (a1_len != a2_len) + return false; + + return 0 == memcmp(a1, a2, a1_len); +} + +struct find_oud_t { + const struct osd_dev_info *odi; + struct device *dev; + struct osd_uld_device *oud; +} ; + +int _mach_odi(struct device *dev, void *find_data) +{ + struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, + class_dev); + struct find_oud_t *fot = find_data; + const struct osd_dev_info *odi = fot->odi; + + if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, + odi->systemid, odi->systemid_len) && + _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, + odi->osdname, odi->osdname_len)) { + OSD_DEBUG("found device sysid_len=%d osdname=%d\n", + odi->systemid_len, odi->osdname_len); + fot->oud = oud; + return 1; + } else { + return 0; + } +} + +/* osduld_info_lookup - Loop through all devices, return the requested osd_dev. + * + * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't + * care. .e.g if they're both zero /dev/osd0 is returned. + */ +struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi) +{ + struct find_oud_t find = {.odi = odi}; + + find.dev = class_find_device(&osd_uld_class, NULL, &find, _mach_odi); + if (likely(find.dev)) { + struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL); + + if (unlikely(!odh)) { + put_device(find.dev); + return ERR_PTR(-ENOMEM); + } + + odh->od = find.oud->od; + odh->oud = find.oud; + + return &odh->od; + } + + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(osduld_info_lookup); + void osduld_put_device(struct osd_dev *od) { if (od && !IS_ERR(od)) { @@ -240,14 +307,39 @@ void osduld_put_device(struct osd_dev *od) * is called after the fops->release. A get_/put_ pair makes * sure we have a cdev for the duration of fput */ - get_device(&oud->class_dev); - fput(odh->file); + if (odh->file) { + get_device(&oud->class_dev); + fput(odh->file); + } put_device(&oud->class_dev); kfree(odh); } } EXPORT_SYMBOL(osduld_put_device); +const struct osd_dev_info *osduld_device_info(struct osd_dev *od) +{ + struct osd_dev_handle *odh = + container_of(od, struct osd_dev_handle, od); + return &odh->oud->odi; +} +EXPORT_SYMBOL(osduld_device_info); + +bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi) +{ + struct osd_dev_handle *odh = + container_of(od, struct osd_dev_handle, od); + struct osd_uld_device *oud = odh->oud; + + return (oud->odi.systemid_len == odi->systemid_len) && + _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, + odi->systemid, odi->systemid_len) && + (oud->odi.osdname_len == odi->osdname_len) && + _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, + odi->osdname, odi->osdname_len); +} +EXPORT_SYMBOL(osduld_device_same); + /* * Scsi Device operations */ @@ -268,7 +360,7 @@ static int __detect_osd(struct osd_uld_device *oud) OSD_ERR("warning: scsi_test_unit_ready failed\n"); osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true); - if (osd_auto_detect_ver(&oud->od, caps)) + if (osd_auto_detect_ver(&oud->od, caps, &oud->odi)) return -ENODEV; return 0; @@ -280,6 +372,8 @@ static void __remove(struct device *dev) class_dev); struct scsi_device *scsi_device = oud->od.scsi_device; + kfree(oud->odi.osdname); + if (oud->cdev.owner) cdev_del(&oud->cdev); diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index 589e5f0d67b1..3ec346e15dda 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -55,10 +55,24 @@ struct osd_dev { #endif }; -/* Retrieve/return osd_dev(s) for use by Kernel clients */ -struct osd_dev *osduld_path_lookup(const char *dev_name); /*Use IS_ERR/ERR_PTR*/ +/* Unique Identification of an OSD device */ +struct osd_dev_info { + unsigned systemid_len; + u8 systemid[OSD_SYSTEMID_LEN]; + unsigned osdname_len; + u8 *osdname; +}; + +/* Retrieve/return osd_dev(s) for use by Kernel clients + * Use IS_ERR/ERR_PTR on returned "osd_dev *". + */ +struct osd_dev *osduld_path_lookup(const char *dev_name); +struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi); void osduld_put_device(struct osd_dev *od); +const struct osd_dev_info *osduld_device_info(struct osd_dev *od); +bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi); + /* Add/remove test ioctls from external modules */ typedef int (do_test_fn)(struct osd_dev *od, unsigned cmd, unsigned long arg); int osduld_register_test(unsigned ioctl, do_test_fn *do_test); @@ -68,8 +82,24 @@ void osduld_unregister_test(unsigned ioctl); void osd_dev_init(struct osd_dev *od, struct scsi_device *scsi_device); void osd_dev_fini(struct osd_dev *od); -/* some hi level device operations */ -int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ +/** + * osd_auto_detect_ver - Detect the OSD version, return Unique Identification + * + * @od: OSD target lun handle + * @caps: Capabilities authorizing OSD root read attributes access + * @odi: Retrieved information uniquely identifying the osd target lun + * Note: odi->osdname must be kfreed by caller. + * + * Auto detects the OSD version of the OSD target and sets the @od + * accordingly. Meanwhile also returns the "system id" and "osd name" root + * attributes which uniquely identify the OSD target. This member is usually + * called by the ULD. ULD users should call osduld_device_info(). + * This rutine allocates osd requests and memory at GFP_KERNEL level and might + * sleep. + */ +int osd_auto_detect_ver(struct osd_dev *od, + void *caps, struct osd_dev_info *odi); + static inline struct request_queue *osd_request_queue(struct osd_dev *od) { return od->scsi_device->request_queue; -- cgit v1.2.3-59-g8ed1b From 71ecb74b15377a6c0e0e6ea95d4b549580fb4d48 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:47:07 +0200 Subject: [SCSI] libosd: bug in osd_req_decode_sense_full() The (never tested) osd_sense_attribute_identification case has never worked. The loop was never advanced on. Fix it to work as intended. On 10/30/2009 04:39 PM, Roel Kluin wrote: I found this by code analysis, searching for while loops that test a local variable, but do not modify the variable. Reported-by: Roel Kluin Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 60b7ca1e9bc0..5e90d19fddf8 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1583,15 +1583,14 @@ int osd_req_decode_sense_full(struct osd_request *or, { struct osd_sense_attributes_data_descriptor *osadd = cur_descriptor; - int len = min(cur_len, sense_len); - int i = 0; + unsigned len = min(cur_len, sense_len); struct osd_sense_attr *pattr = osadd->sense_attrs; - while (len < 0) { + while (len >= sizeof(*pattr)) { u32 attr_page = be32_to_cpu(pattr->attr_page); u32 attr_id = be32_to_cpu(pattr->attr_id); - if (i++ == 0) { + if (!osi->attr.attr_page) { osi->attr.attr_page = attr_page; osi->attr.attr_id = attr_id; } @@ -1602,6 +1601,8 @@ int osd_req_decode_sense_full(struct osd_request *or, bad_attr_list++; max_attr--; } + + len -= sizeof(*pattr); OSD_SENSE_PRINT2( "osd_sense_attribute_identification" "attr_page=0x%x attr_id=0x%x\n", -- cgit v1.2.3-59-g8ed1b From eff21490c91f981126f0ead3c081dde4f425d387 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:47:47 +0200 Subject: [SCSI] libosd: Bugfix of error handling in attributes-list decoding When an error was detected in an attribute list do to a target bug. We would print an error but spin endlessly regardless. Fix it. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 5e90d19fddf8..ba25b1e58a6c 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1167,6 +1167,7 @@ int osd_req_decode_get_attr_list(struct osd_request *or, "c=%d r=%d n=%d\n", cur_bytes, returned_bytes, n); oa->val_ptr = NULL; + cur_bytes = returned_bytes; /* break the caller loop */ break; } -- cgit v1.2.3-59-g8ed1b From aa9fffbe2c4db4557248c5c626a85bf3c7867044 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 16 Nov 2009 20:48:38 +0200 Subject: [SCSI] libosd: Error handling revamped Administer some love to the osd_req_decode_sense function * Fix a bad bug with osd_req_decode_sense(). If there was no scsi residual, .i.e the request never reached the target, then all the osd_sense_info members where garbage. * Add grossly missing in/out_resid to osd_sense_info and fill them in properly. * Define an osd_err_priority enum which divides the possible errors into 7 categories in ascending severity. Each category is also assigned a Linux return code translation. Analyze the different osd/scsi/block returned errors and set the proper osd_err_priority and Linux return code accordingly. * extra check a few situations so not to get stuck with inconsistent error view. Example an empty residual with an error code, and other places ... Lots of libosd's osd_req_decode_sense clients had this logic in some form or another. Consolidate all these into one place that should actually know about osd returns. Thous translating it to a more abstract error. Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 85 ++++++++++++++++++++++++++++++++++------ include/scsi/osd_initiator.h | 26 +++++++++++- 2 files changed, 99 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index ba25b1e58a6c..950202a70bcf 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -475,7 +475,8 @@ EXPORT_SYMBOL(osd_end_request); int osd_execute_request(struct osd_request *or) { - return blk_execute_rq(or->request->q, NULL, or->request, 0); + return or->async_error = + blk_execute_rq(or->request->q, NULL, or->request, 0); } EXPORT_SYMBOL(osd_execute_request); @@ -485,8 +486,12 @@ static void osd_request_async_done(struct request *req, int error) or->async_error = error; - if (error) - OSD_DEBUG("osd_request_async_done error recieved %d\n", error); + if (unlikely(error)) { + OSD_DEBUG("osd_request_async_done error recieved %d " + "errors 0x%x\n", error, req->errors); + if (!req->errors) /* don't miss out on this one */ + req->errors = error; + } if (or->async_done) or->async_done(or, or->async_private); @@ -1451,6 +1456,15 @@ int osd_finalize_request(struct osd_request *or, } EXPORT_SYMBOL(osd_finalize_request); +static bool _is_osd_security_code(int code) +{ + return (code == osd_security_audit_value_frozen) || + (code == osd_security_working_key_frozen) || + (code == osd_nonce_not_unique) || + (code == osd_nonce_timestamp_out_of_range) || + (code == osd_invalid_dataout_buffer_integrity_check_value); +} + #define OSD_SENSE_PRINT1(fmt, a...) \ do { \ if (__cur_sense_need_output) \ @@ -1473,9 +1487,16 @@ int osd_req_decode_sense_full(struct osd_request *or, #else bool __cur_sense_need_output = !silent; #endif + int ret; - if (!or->request->errors) + if (likely(!or->request->errors)) { + osi->out_resid = 0; + osi->in_resid = 0; return 0; + } + + osi = osi ? : &local_osi; + memset(osi, 0, sizeof(*osi)); ssdb = or->request->sense; sense_len = or->request->sense_len; @@ -1483,17 +1504,15 @@ int osd_req_decode_sense_full(struct osd_request *or, OSD_ERR("Block-layer returned error(0x%x) but " "sense_len(%u) || key(%d) is empty\n", or->request->errors, sense_len, ssdb->sense_key); - return -EIO; + goto analyze; } if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", ssdb->response_code, sense_len); - return -EIO; + goto analyze; } - osi = osi ? : &local_osi; - memset(osi, 0, sizeof(*osi)); osi->key = ssdb->sense_key; osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); original_sense_len = ssdb->additional_sense_length + 8; @@ -1503,9 +1522,10 @@ int osd_req_decode_sense_full(struct osd_request *or, __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); #endif OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " - "additional_code=0x%x\n", + "additional_code=0x%x async_error=%d errors=0x%x\n", osi->key, original_sense_len, sense_len, - osi->additional_code); + osi->additional_code, or->async_error, + or->request->errors); if (original_sense_len < sense_len) sense_len = original_sense_len; @@ -1637,7 +1657,50 @@ int osd_req_decode_sense_full(struct osd_request *or, cur_descriptor += cur_len; } - return (osi->key > scsi_sk_recovered_error) ? -EIO : 0; +analyze: + if (!osi->key) { + /* scsi sense is Empty, the request was never issued to target + * linux return code might tell us what happened. + */ + if (or->async_error == -ENOMEM) + osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; + else + osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; + ret = or->async_error; + } else if (osi->key <= scsi_sk_recovered_error) { + osi->osd_err_pri = 0; + ret = 0; + } else if (osi->additional_code == scsi_invalid_field_in_cdb) { + if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) { + osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES; + ret = -EFAULT; /* caller should recover from this */ + } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) { + osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND; + ret = -ENOENT; + } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) { + osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS; + ret = -EACCES; + } else { + osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; + ret = -EINVAL; + } + } else if (osi->additional_code == osd_quota_error) { + osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE; + ret = -ENOSPC; + } else if (_is_osd_security_code(osi->additional_code)) { + osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; + ret = -EINVAL; + } else { + osi->osd_err_pri = OSD_ERR_PRI_EIO; + ret = -EIO; + } + + if (or->out.req) + osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; + if (or->in.req) + osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; + + return ret; } EXPORT_SYMBOL(osd_req_decode_sense_full); diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index 3ec346e15dda..39d6d1097153 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -267,7 +267,7 @@ int osd_execute_request_async(struct osd_request *or, * @bad_attr_list - List of failing attributes (optional) * @max_attr - Size of @bad_attr_list. * - * After execution, sense + return code can be analyzed using this function. The + * After execution, osd_request results are analyzed using this function. The * return code is the final disposition on the error. So it is possible that a * CHECK_CONDITION was returned from target but this will return NO_ERROR, for * example on recovered errors. All parameters are optional if caller does @@ -276,7 +276,31 @@ int osd_execute_request_async(struct osd_request *or, * of the SCSI_OSD_DPRINT_SENSE Kconfig value. Set @silent if you know the * command would routinely fail, to not spam the dmsg file. */ + +/** + * osd_err_priority - osd categorized return codes in ascending severity. + * + * The categories are borrowed from the pnfs_osd_errno enum. + * See comments for translated Linux codes returned by osd_req_decode_sense. + */ +enum osd_err_priority { + OSD_ERR_PRI_NO_ERROR = 0, + /* Recoverable, caller should clear_highpage() all pages */ + OSD_ERR_PRI_CLEAR_PAGES = 1, /* -EFAULT */ + OSD_ERR_PRI_RESOURCE = 2, /* -ENOMEM */ + OSD_ERR_PRI_BAD_CRED = 3, /* -EINVAL */ + OSD_ERR_PRI_NO_ACCESS = 4, /* -EACCES */ + OSD_ERR_PRI_UNREACHABLE = 5, /* any other */ + OSD_ERR_PRI_NOT_FOUND = 6, /* -ENOENT */ + OSD_ERR_PRI_NO_SPACE = 7, /* -ENOSPC */ + OSD_ERR_PRI_EIO = 8, /* -EIO */ +}; + struct osd_sense_info { + u64 out_resid; /* Zero on success otherwise out residual */ + u64 in_resid; /* Zero on success otherwise in residual */ + enum osd_err_priority osd_err_pri; + int key; /* one of enum scsi_sense_keys */ int additional_code ; /* enum osd_additional_sense_codes */ union { /* Sense specific information */ -- cgit v1.2.3-59-g8ed1b From 0899638688f223fd9e9fee60d662665e11693d12 Mon Sep 17 00:00:00 2001 From: Martin Michlmayr Date: Mon, 16 Nov 2009 20:49:25 +0200 Subject: [SCSI] osd_protocol.h: Add missing #include include/scsi/osd_protocol.h uses ALIGN() without an #include , leading to: | include/scsi/osd_protocol.h:362: error: implicit declaration of function 'ALIGN' Signed-off-by: Martin Michlmayr Signed-off-by: Boaz Harrosh Cc: Stable Tree Signed-off-by: James Bottomley --- include/scsi/osd_protocol.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h index 2cc8e8b1cc19..685661283540 100644 --- a/include/scsi/osd_protocol.h +++ b/include/scsi/osd_protocol.h @@ -17,6 +17,7 @@ #define __OSD_PROTOCOL_H__ #include +#include #include #include -- cgit v1.2.3-59-g8ed1b From 1acf3b06f77a48b1607534408866473fb8018a65 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 17 Nov 2009 14:53:20 -0800 Subject: [SCSI] fix func names in kernel-doc Fix scsi_devinfo.c kernel-doc function names to match actual function names. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: James Bottomley --- drivers/scsi/scsi_devinfo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 802e91c8892e..37af178b2d17 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -453,7 +453,7 @@ int scsi_get_device_flags(struct scsi_device *sdev, /** - * get_device_flags_keyed - get device specific flags from the dynamic device list. + * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list * @sdev: &scsi_device to get flags for * @vendor: vendor name * @model: model name @@ -684,7 +684,7 @@ MODULE_PARM_DESC(default_dev_flags, "scsi default device flag integer value"); /** - * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list. + * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list **/ void scsi_exit_devinfo(void) { -- cgit v1.2.3-59-g8ed1b From 832151f45806613f203c4c0308c1566d882b971f Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 17 Nov 2009 14:53:22 -0800 Subject: [SCSI] st: fix test of value range in st_set_options() value cannot logically be less than START and greater than BUFFERSIZE. #define EXTENDED_SENSE_START 18 // vi include/scsi/scsi_cmnd.h +105 #define SCSI_SENSE_BUFFERSIZE 96 [akpm@linux-foundation.org: fix warning] Signed-off-by: Roel Kluin Signed-off-by: Andrew Morton Acked-by: Kai Makisara Signed-off-by: James Bottomley --- drivers/scsi/st.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 12d58a7ed6bc..ad59abb47722 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -2280,7 +2280,8 @@ static int st_set_options(struct scsi_tape *STp, long options) } else if (code == MT_ST_SET_CLN) { value = (options & ~MT_ST_OPTIONS) & 0xff; if (value != 0 && - value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE) + (value < EXTENDED_SENSE_START || + value >= SCSI_SENSE_BUFFERSIZE)) return (-EINVAL); STp->cln_mode = value; STp->cln_sense_mask = (options >> 8) & 0xff; -- cgit v1.2.3-59-g8ed1b From c868595d5686e97183bc1ad85502835d81d7a457 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:39:16 -0500 Subject: [SCSI] lpfc 8.3.6 : FCoE Protocol Fixes FCoE Protocol fixes. - Fixed FIP frame designation for ELS commands. - Fix CVL received on Port 1 not processed by driver. - Fix Zeroed frame on wire after FLOGI - Fix vport keep-alive does not contain the correct WWN. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 4 ++-- drivers/scsi/lpfc/lpfc_els.c | 21 +++++++++++++++++---- drivers/scsi/lpfc/lpfc_hbadisc.c | 8 +++++--- drivers/scsi/lpfc/lpfc_hw.h | 6 ++---- drivers/scsi/lpfc/lpfc_hw4.h | 10 ++++++++-- drivers/scsi/lpfc/lpfc_init.c | 4 ++-- drivers/scsi/lpfc/lpfc_mbox.c | 7 +++++++ drivers/scsi/lpfc/lpfc_sli.c | 12 ++++++++++-- drivers/scsi/lpfc/lpfc_sli.h | 3 ++- drivers/scsi/lpfc/lpfc_vport.c | 2 +- 10 files changed, 56 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ebeddbe86e67..2fd3e45c577e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -290,8 +290,8 @@ struct lpfc_vport { uint16_t vpi; uint16_t vfi; - uint8_t vfi_state; -#define LPFC_VFI_REGISTERED 0x1 + uint8_t vpi_state; +#define LPFC_VPI_REGISTERED 0x1 uint32_t fc_flag; /* FC flags */ /* Several of these flags are HBA centric and should be moved to diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index fe0a33c9b874..e9e423f28f8a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -177,9 +177,22 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, ((elscmd == ELS_CMD_FLOGI) || (elscmd == ELS_CMD_FDISC) || (elscmd == ELS_CMD_LOGO))) - elsiocb->iocb_flag |= LPFC_FIP_ELS; + switch (elscmd) { + case ELS_CMD_FLOGI: + elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + case ELS_CMD_FDISC: + elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + case ELS_CMD_LOGO: + elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) + & LPFC_FIP_ELS_ID_MASK); + break; + } else - elsiocb->iocb_flag &= ~LPFC_FIP_ELS; + elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; icmd = &elsiocb->iocb; @@ -591,7 +604,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } else { ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - if (vport->vfi_state & LPFC_VFI_REGISTERED) { + if (vport->vpi_state & LPFC_VPI_REGISTERED) { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } else @@ -5401,7 +5414,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (lpfc_els_chk_latt(vport)) goto dropit; - /* Ignore traffic recevied during vport shutdown. */ + /* Ignore traffic received during vport shutdown. */ if (vport->load_flag & FC_UNLOADING) goto dropit; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 20fca3f6d43b..3c06aa54a3e5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1798,8 +1798,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto fail_free_mem; } - /* Mark the vport has registered with its VFI */ - vport->vfi_state |= LPFC_VFI_REGISTERED; + /* The VPI is implicitly registered when the VFI is registered */ + vport->vpi_state |= LPFC_VPI_REGISTERED; if (vport->port_state == LPFC_FABRIC_CFG_LINK) { lpfc_start_fdiscs(phba); @@ -2257,6 +2257,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxStatus); break; } + vport->vpi_state &= ~LPFC_VPI_REGISTERED; vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); /* @@ -2314,6 +2315,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto out; } + vport->vpi_state |= LPFC_VPI_REGISTERED; vport->num_disc_nodes = 0; /* go thru NPR list and issue ELS PLOGIs */ if (vport->fc_npr_cnt) @@ -4464,7 +4466,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_mbx_unreg_vpi(vports[i]); vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; + vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; } lpfc_destroy_vport_work_array(phba, vports); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 8274f998ef2f..7070c77357a9 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2293,8 +2293,7 @@ typedef struct { uint32_t rsvd1; uint32_t rsvd2:8; uint32_t sid:24; - uint32_t rsvd3; - uint32_t rsvd4; + uint32_t wwn[2]; uint32_t rsvd5; uint16_t vfi; uint16_t vpi; @@ -2302,8 +2301,7 @@ typedef struct { uint32_t rsvd1; uint32_t sid:24; uint32_t rsvd2:8; - uint32_t rsvd3; - uint32_t rsvd4; + uint32_t wwn[2]; uint32_t rsvd5; uint16_t vpi; uint16_t vfi; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 4f03f1d876d0..95f8b4e0063d 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -453,6 +453,13 @@ struct lpfc_wqe_generic{ #define lpfc_wqe_gen_wqec_SHIFT 7 #define lpfc_wqe_gen_wqec_MASK 0x00000001 #define lpfc_wqe_gen_wqec_WORD word11 +#define ELS_ID_FLOGI 3 +#define ELS_ID_FDISC 2 +#define ELS_ID_LOGO 1 +#define ELS_ID_DEFAULT 0 +#define lpfc_wqe_gen_els_id_SHIFT 4 +#define lpfc_wqe_gen_els_id_MASK 0x00000003 +#define lpfc_wqe_gen_els_id_WORD word11 #define lpfc_wqe_gen_cmd_type_SHIFT 0 #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F #define lpfc_wqe_gen_cmd_type_WORD word11 @@ -1395,8 +1402,7 @@ struct lpfc_mbx_reg_vfi { #define lpfc_reg_vfi_fcfi_SHIFT 0 #define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF #define lpfc_reg_vfi_fcfi_WORD word2 - uint32_t word3_rsvd; - uint32_t word4_rsvd; + uint32_t wwn[2]; struct ulp_bde64 bde; uint32_t word8_rsvd; uint32_t word9_rsvd; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d7385d258f78..02268a1eec69 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2229,7 +2229,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) if (vports[i]->load_flag & FC_UNLOADING) continue; - vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; + vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, @@ -3047,7 +3047,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); vport = lpfc_find_vport_by_vpid(phba, - acqe_fcoe->index /*- phba->vpi_base*/); + acqe_fcoe->index - phba->vpi_base); if (!vport) break; ndlp = lpfc_findnode_did(vport, Fabric_DID); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 51c9a1f576f6..a9afd8b94b6a 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -820,6 +820,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; mb->un.varRegVpi.sid = vport->fc_myDID; mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; + memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, + sizeof(struct lpfc_name)); + mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); + mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; @@ -1818,6 +1822,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); + memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); + reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); + reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c4b19d094d39..ce0a1a1c4792 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5756,12 +5756,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint8_t cmnd; uint16_t xritag; struct ulp_bde64 *bpl = NULL; + uint32_t els_id = ELS_ID_DEFAULT; fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ if (iocbq->iocb_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; - else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) + else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) command_type = ELS_COMMAND_FIP; else command_type = ELS_COMMAND_NON_FIP; @@ -5822,6 +5823,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); /* CCP CCPE PV PRI in word10 were set in the memcpy */ + + if (command_type == ELS_COMMAND_FIP) { + els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) + >> LPFC_FIP_ELS_ID_SHIFT); + } + bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); + break; case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=payload_offset */ @@ -11282,7 +11290,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, } fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); - if (!vport) { + if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { /* throw out the frame */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7b12663909a7..174dcda32195 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -60,7 +60,8 @@ struct lpfc_iocbq { #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ -#define LPFC_FIP_ELS 0x40 +#define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */ +#define LPFC_FIP_ELS_ID_SHIFT 6 uint8_t abort_count; uint8_t rsvd2; diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 606efa767548..096d178c4c86 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) * by the port. */ if ((phba->sli_rev == LPFC_SLI_REV4) && - (pport->vfi_state & LPFC_VFI_REGISTERED)) { + (pport->vpi_state & LPFC_VPI_REGISTERED)) { rc = lpfc_sli4_init_vpi(phba, vpi); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, -- cgit v1.2.3-59-g8ed1b From 5ffc266ee7a62741ebee89ede15049ec0f02fa75 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:39:44 -0500 Subject: [SCSI] lpfc 8.3.6 : FC Protocol Fixes FC protocol fixes. - Fix send sequence logic to handle multi SGL IOCBs. - Fix FDISC completion always setting VPORT state to failed. - Ported the fix on reporting of max_vpi to uppper layer. - Fix incorrect number of Vports allowed to be created. - Fixed Dead FCoE port after creating vports. - Added handling of ELS request for Reinstate Recovery Qualifier (RRQ) - Handle unsolicited CT exchange initiator receiving CT exchange ABTS - Migrate LUN queue depth ramp up code to scsi mid-layer. - Made ABTS WQE go to the same WQ as the WQE to be aborted. - Fix Vport does not rediscover after FCF goes away. - Fixed lpfc_unreg_vfi failure after devloss timeout. - Fixed RPI bit leak. - Fix hbq pointer corruption during target discovery. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_disc.h | 2 - drivers/scsi/lpfc/lpfc_els.c | 40 +++++++++-- drivers/scsi/lpfc/lpfc_hw.h | 2 +- drivers/scsi/lpfc/lpfc_hw4.h | 23 ++++++- drivers/scsi/lpfc/lpfc_init.c | 3 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 7 ++ drivers/scsi/lpfc/lpfc_scsi.c | 135 +++++++++++++++---------------------- drivers/scsi/lpfc/lpfc_sli.c | 133 +++++++++++++++++++++++++++++------- drivers/scsi/lpfc/lpfc_sli.h | 1 + drivers/scsi/lpfc/lpfc_sli4.h | 5 ++ drivers/scsi/lpfc/lpfc_vport.c | 2 + 12 files changed, 235 insertions(+), 119 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2fd3e45c577e..1cc23a69db5e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -202,6 +202,7 @@ struct lpfc_stats { uint32_t elsRcvLIRR; uint32_t elsRcvRPS; uint32_t elsRcvRPL; + uint32_t elsRcvRRQ; uint32_t elsXmitFLOGI; uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f26f6e160a2a..2851d75ffc6f 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -105,8 +105,6 @@ struct lpfc_nodelist { struct lpfc_vport *vport; struct lpfc_work_evt els_retry_evt; struct lpfc_work_evt dev_loss_evt; - unsigned long last_ramp_up_time; /* jiffy of last ramp up */ - unsigned long last_q_full_time; /* jiffy of last queue full */ struct kref kref; atomic_t cmd_pending; uint32_t cmd_qdepth; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e9e423f28f8a..a079bbc03cf8 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4520,6 +4520,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } +/** + * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB + * received as an ELS unsolicited event. A request to RRQ shall only + * be accepted if the Originator Nx_Port N_Port_ID or the Responder + * Nx_Port N_Port_ID of the target Exchange is the same as the + * N_Port_ID of the Nx_Port that makes the request. If the RRQ is + * not accepted, an LS_RJT with reason code "Unable to perform + * command request" and reason code explanation "Invalid Originator + * S_ID" shall be returned. For now, we just unconditionally accept + * RRQ from the target. + **/ +static void +lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); +} + /** * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd * @phba: pointer to lpfc hba data structure. @@ -5636,6 +5659,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; + case ELS_CMD_RRQ: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RRQ: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRRQ++; + lpfc_els_rcv_rrq(vport, elsiocb, ndlp); + if (newnode) + lpfc_nlp_put(ndlp); + break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", @@ -6042,11 +6075,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4]); goto fdisc_failed; } - if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) - lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_nlp_put(ndlp); - /* giving up on FDISC. Cancel discovery timer */ - lpfc_can_disctmo(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_FABRIC; if (vport->phba->fc_topology == TOPOLOGY_LOOP) @@ -6125,6 +6153,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int did = ndlp->nlp_DID; int rc; + vport->port_state = LPFC_FDISC; cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_FDISC); @@ -6190,7 +6219,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); - vport->port_state = LPFC_FDISC; return 0; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 7070c77357a9..f279d191b628 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -3538,7 +3538,7 @@ typedef struct _IOCB { /* IOCB structure */ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ - + struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ } un; union { diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 95f8b4e0063d..fa3306386786 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -194,6 +194,26 @@ struct lpfc_sli4_flags { #define lpfc_fip_flag_WORD word0 }; +struct sli4_bls_acc { + uint32_t word0_rsvd; /* Word0 must be reserved */ + uint32_t word1; +#define lpfc_abts_orig_SHIFT 0 +#define lpfc_abts_orig_MASK 0x00000001 +#define lpfc_abts_orig_WORD word1 +#define LPFC_ABTS_UNSOL_RSP 1 +#define LPFC_ABTS_UNSOL_INT 0 + uint32_t word2; +#define lpfc_abts_rxid_SHIFT 0 +#define lpfc_abts_rxid_MASK 0x0000FFFF +#define lpfc_abts_rxid_WORD word2 +#define lpfc_abts_oxid_SHIFT 16 +#define lpfc_abts_oxid_MASK 0x0000FFFF +#define lpfc_abts_oxid_WORD word2 + uint32_t word3; + uint32_t word4; + uint32_t word5_rsvd; /* Word5 must be reserved */ +}; + /* event queue entry structure */ struct lpfc_eqe { uint32_t word0; @@ -1980,7 +2000,8 @@ struct lpfc_bmbx_create { #define SGL_ALIGN_SZ 64 #define SGL_PAGE_SIZE 4096 /* align SGL addr on a size boundary - adjust address up */ -#define NO_XRI ((uint16_t)-1) +#define NO_XRI ((uint16_t)-1) + struct wqe_common { uint32_t word6; #define wqe_xri_tag_SHIFT 0 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 02268a1eec69..6932657d74ad 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4931,7 +4931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; - phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; + phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? + (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params XRI(B:%d M:%d), " diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 3e74136f1ede..2ed6af194932 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1223,6 +1223,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_free_rpi(phba, + mb->u.mb.un.varRegLogin.rpi); + spin_lock_irq(&phba->hbalock); + } mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { __lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -1230,6 +1236,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, } lpfc_nlp_put(ndlp); list_del(&mb->list); + phba->sli.mboxq_cnt--; mempool_free(mb, phba->mbox_mem_pool); } } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f5ab5dd9bbbf..bf80cdefb506 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -245,6 +245,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, return; } +/** + * lpfc_change_queue_depth - Alter scsi device queue depth + * @sdev: Pointer the scsi device on which to change the queue depth. + * @qdepth: New queue depth to set the sdev to. + * @reason: The reason for the queue depth change. + * + * This function is called by the midlayer and the LLD to alter the queue + * depth for a scsi device. This function sets the queue depth to the new + * value and sends an event out to log the queue depth change. + **/ +int +lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_rport_data *rdata; + unsigned long new_queue_depth, old_queue_depth; + + old_queue_depth = sdev->queue_depth; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + new_queue_depth = sdev->queue_depth; + rdata = sdev->hostdata; + if (rdata) + lpfc_send_sdev_queuedepth_change_event(phba, vport, + rdata->pnode, sdev->lun, + old_queue_depth, + new_queue_depth); + return sdev->queue_depth; +} + /** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. @@ -309,8 +339,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, if (vport->cfg_lun_queue_depth <= queue_depth) return; spin_lock_irqsave(&phba->hbalock, flags); - if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || - ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { + if (time_before(jiffies, + phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || + time_before(jiffies, + phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } @@ -342,10 +374,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) struct lpfc_vport **vports; struct Scsi_Host *shost; struct scsi_device *sdev; - unsigned long new_queue_depth, old_queue_depth; + unsigned long new_queue_depth; unsigned long num_rsrc_err, num_cmd_success; int i; - struct lpfc_rport_data *rdata; num_rsrc_err = atomic_read(&phba->num_rsrc_err); num_cmd_success = atomic_read(&phba->num_cmd_success); @@ -363,22 +394,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) else new_queue_depth = sdev->queue_depth - new_queue_depth; - old_queue_depth = sdev->queue_depth; - if (sdev->ordered_tags) - scsi_adjust_queue_depth(sdev, - MSG_ORDERED_TAG, - new_queue_depth); - else - scsi_adjust_queue_depth(sdev, - MSG_SIMPLE_TAG, - new_queue_depth); - rdata = sdev->hostdata; - if (rdata) - lpfc_send_sdev_queuedepth_change_event( - phba, vports[i], - rdata->pnode, - sdev->lun, old_queue_depth, - new_queue_depth); + lpfc_change_queue_depth(sdev, new_queue_depth, + SCSI_QDEPTH_DEFAULT); } } lpfc_destroy_vport_work_array(phba, vports); @@ -402,7 +419,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) struct Scsi_Host *shost; struct scsi_device *sdev; int i; - struct lpfc_rport_data *rdata; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -412,22 +428,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) if (vports[i]->cfg_lun_queue_depth <= sdev->queue_depth) continue; - if (sdev->ordered_tags) - scsi_adjust_queue_depth(sdev, - MSG_ORDERED_TAG, - sdev->queue_depth+1); - else - scsi_adjust_queue_depth(sdev, - MSG_SIMPLE_TAG, - sdev->queue_depth+1); - rdata = sdev->hostdata; - if (rdata) - lpfc_send_sdev_queuedepth_change_event( - phba, vports[i], - rdata->pnode, - sdev->lun, - sdev->queue_depth - 1, - sdev->queue_depth); + lpfc_change_queue_depth(sdev, + sdev->queue_depth+1, + SCSI_QDEPTH_RAMP_UP); } } lpfc_destroy_vport_work_array(phba, vports); @@ -2208,7 +2211,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct scsi_cmnd *cmd = lpfc_cmd->pCmd; int result; struct scsi_device *tmp_sdev; - int depth = 0; + int depth; unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost = cmd->device->host; @@ -2375,67 +2378,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, return; } - if (!result) lpfc_rampup_queue_depth(vport, queue_depth); - if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && - ((jiffies - pnode->last_ramp_up_time) > - LPFC_Q_RAMP_UP_INTERVAL * HZ) && - ((jiffies - pnode->last_q_full_time) > - LPFC_Q_RAMP_UP_INTERVAL * HZ) && - (vport->cfg_lun_queue_depth > queue_depth)) { - shost_for_each_device(tmp_sdev, shost) { - if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ - if (tmp_sdev->id != scsi_id) - continue; - if (tmp_sdev->ordered_tags) - scsi_adjust_queue_depth(tmp_sdev, - MSG_ORDERED_TAG, - tmp_sdev->queue_depth+1); - else - scsi_adjust_queue_depth(tmp_sdev, - MSG_SIMPLE_TAG, - tmp_sdev->queue_depth+1); - - pnode->last_ramp_up_time = jiffies; - } - } - lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, - 0xFFFFFFFF, - queue_depth , queue_depth + 1); - } - /* * Check for queue full. If the lun is reporting queue full, then * back off the lun queue depth to prevent target overloads. */ if (result == SAM_STAT_TASK_SET_FULL && pnode && NLP_CHK_NODE_ACT(pnode)) { - pnode->last_q_full_time = jiffies; - shost_for_each_device(tmp_sdev, shost) { if (tmp_sdev->id != scsi_id) continue; depth = scsi_track_queue_full(tmp_sdev, - tmp_sdev->queue_depth - 1); - } - /* - * The queue depth cannot be lowered any more. - * Modify the returned error code to store - * the final depth value set by - * scsi_track_queue_full. - */ - if (depth == -1) - depth = shost->cmd_per_lun; - - if (depth) { + tmp_sdev->queue_depth-1); + if (depth <= 0) + continue; lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0711 detected queue full - lun queue " "depth adjusted to %d.\n", depth); lpfc_send_sdev_queuedepth_change_event(phba, vport, - pnode, 0xFFFFFFFF, - depth+1, depth); + pnode, + tmp_sdev->lun, + depth+1, depth); } } @@ -3019,6 +2984,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocb->fcp_wqidx = iocb->fcp_wqidx; + if (lpfc_is_link_up(phba)) icmd->ulpCommand = CMD_ABORT_XRI_CN; else @@ -3596,6 +3565,7 @@ struct scsi_host_template lpfc_template = { .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, .vendor_id = LPFC_NL_VENDOR_ID, + .change_queue_depth = lpfc_change_queue_depth, }; struct scsi_host_template lpfc_vport_template = { @@ -3617,4 +3587,5 @@ struct scsi_host_template lpfc_vport_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_vport_attrs, .max_sectors = 0xFFFF, + .change_queue_depth = lpfc_change_queue_depth, }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ce0a1a1c4792..1d2f65c4eb0b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5748,7 +5748,7 @@ static int lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, union lpfc_wqe *wqe) { - uint32_t payload_len = 0; + uint32_t xmit_len = 0, total_len = 0; uint8_t ct = 0; uint32_t fip; uint32_t abort_tag; @@ -5757,6 +5757,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint16_t xritag; struct ulp_bde64 *bpl = NULL; uint32_t els_id = ELS_ID_DEFAULT; + int numBdes, i; + struct ulp_bde64 bde; fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ @@ -5774,6 +5776,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, wqe->words[7] = 0; /* The ct field has moved so reset */ /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { + numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / + sizeof(struct ulp_bde64); bpl = (struct ulp_bde64 *) ((struct lpfc_dmabuf *)iocbq->context3)->virt; if (!bpl) @@ -5786,9 +5790,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * can assign it to the sgl. */ wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); - payload_len = wqe->generic.bde.tus.f.bdeSize; + xmit_len = wqe->generic.bde.tus.f.bdeSize; + total_len = 0; + for (i = 0; i < numBdes; i++) { + bde.tus.w = le32_to_cpu(bpl[i].tus.w); + total_len += bde.tus.f.bdeSize; + } } else - payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; + xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; iocbq->iocb.ulpIoTag = iocbq->iotag; cmnd = iocbq->iocb.ulpCommand; @@ -5802,7 +5811,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpCommand); return IOCB_ERROR; } - wqe->els_req.payload_len = payload_len; + wqe->els_req.payload_len = xmit_len; /* Els_reguest64 has a TMO */ bf_set(wqe_tmo, &wqe->els_req.wqe_com, iocbq->iocb.ulpTimeout); @@ -5831,6 +5840,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); break; + case CMD_XMIT_SEQUENCE64_CX: + bf_set(lpfc_wqe_gen_context, &wqe->generic, + iocbq->iocb.un.ulpWord[3]); + wqe->generic.word3 = 0; + bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); + bf_set(wqe_xc, &wqe->generic, 1); + /* The entire sequence is transmitted for this IOCB */ + xmit_len = total_len; + cmnd = CMD_XMIT_SEQUENCE64_CR; case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=payload_offset */ /* payload offset used for multilpe outstanding @@ -5840,7 +5858,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* word4 relative_offset memcpy */ /* word5 r_ctl/df_ctl memcpy */ bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); - wqe->xmit_sequence.xmit_len = payload_len; + wqe->xmit_sequence.xmit_len = xmit_len; + command_type = OTHER_COMMAND; break; case CMD_XMIT_BCAST64_CN: /* word3 iocb=iotag32 wqe=payload_len */ @@ -5869,7 +5888,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, case CMD_FCP_IREAD64_CR: /* FCP_CMD is always the 1st sgl entry */ wqe->fcp_iread.payload_len = - payload_len + sizeof(struct fcp_rsp); + xmit_len + sizeof(struct fcp_rsp); /* word 4 (xfer length) should have been set on the memcpy */ @@ -5906,7 +5925,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * sgl[1] = rsp. * */ - wqe->gen_req.command_len = payload_len; + wqe->gen_req.command_len = xmit_len; /* Word4 parameter copied in the memcpy */ /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ /* word6 context tag copied in memcpy */ @@ -5979,10 +5998,25 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * iocbq from scratch. */ memset(wqe, 0, sizeof(union lpfc_wqe)); + /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, - iocbq->iocb.un.ulpWord[3]); - bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - iocbq->sli4_xritag); + bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); + if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == + LPFC_ABTS_UNSOL_INT) { + /* ABTS sent by initiator to CT exchange, the + * RX_ID field will be filled with the newly + * allocated responder XRI. + */ + bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, + iocbq->sli4_xritag); + } else { + /* ABTS sent by responder to CT exchange, the + * RX_ID field will be filled with the responder + * RX_ID from ABTS. + */ + bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, + bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); + } bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, @@ -6044,7 +6078,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, uint16_t xritag; union lpfc_wqe wqe; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; - uint32_t fcp_wqidx; if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || @@ -6079,8 +6112,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; if (piocb->iocb_flag & LPFC_IO_FCP) { - fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); - if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) + /* + * For FCP command IOCB, get a new WQ index to distribute + * WQE across the WQsr. On the other hand, for abort IOCB, + * it carries the same WQ index to the original command + * IOCB. + */ + if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) + piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); + if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], + &wqe)) return IOCB_ERROR; } else { if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) @@ -7070,6 +7112,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; + if (phba->link_state >= LPFC_LINK_UP) iabt->ulpCommand = CMD_ABORT_XRI_CN; else @@ -7273,6 +7318,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->vport = phba->pport; + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; + if (lpfc_is_link_up(phba)) abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; else @@ -8671,7 +8719,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) uint32_t status; unsigned long iflags; - lpfc_sli4_rq_release(hrq, drq); if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) goto out; @@ -8681,6 +8728,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2537 Receive Frame Truncated!!\n"); case FC_STATUS_RQ_SUCCESS: + lpfc_sli4_rq_release(hrq, drq); spin_lock_irqsave(&phba->hbalock, iflags); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); if (!dma_buf) { @@ -10997,8 +11045,8 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, { struct lpfc_iocbq *ctiocb = NULL; struct lpfc_nodelist *ndlp; - uint16_t oxid; - uint32_t sid; + uint16_t oxid, rxid; + uint32_t sid, fctl; IOCB_t *icmd; if (!lpfc_is_link_up(phba)) @@ -11006,6 +11054,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, sid = sli4_sid_from_fc_hdr(fc_hdr); oxid = be16_to_cpu(fc_hdr->fh_ox_id); + rxid = be16_to_cpu(fc_hdr->fh_rx_id); ndlp = lpfc_findnode_did(phba->pport, sid); if (!ndlp) { @@ -11020,9 +11069,12 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, if (!ctiocb) return; + /* Extract the F_CTL field from FC_HDR */ + fctl = sli4_fctl_from_fc_hdr(fc_hdr); + icmd = &ctiocb->iocb; - icmd->un.xseq64.bdl.ulpIoTag32 = 0; icmd->un.xseq64.bdl.bdeSize = 0; + icmd->un.xseq64.bdl.ulpIoTag32 = 0; icmd->un.xseq64.w5.hcsw.Dfctl = 0; icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; @@ -11033,13 +11085,30 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, icmd->ulpLe = 1; icmd->ulpClass = CLASS3; icmd->ulpContext = ndlp->nlp_rpi; - icmd->un.ulpWord[3] = oxid; - ctiocb->sli4_xritag = NO_XRI; ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; + if (fctl & FC_FC_EX_CTX) { + /* ABTS sent by responder to CT exchange, construction + * of BA_ACC will use OX_ID from ABTS for the XRI_TAG + * field and RX_ID from ABTS for RX_ID field. + */ + bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); + bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); + ctiocb->sli4_xritag = oxid; + } else { + /* ABTS sent by initiator to CT exchange, construction + * of BA_ACC will need to allocate a new XRI as for the + * XRI_TAG and RX_ID fields. + */ + bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); + bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); + ctiocb->sli4_xritag = NO_XRI; + } + bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); + /* Xmit CT abts accept on exchange */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", @@ -11066,19 +11135,31 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct fc_frame_header fc_hdr; + uint32_t fctl; bool abts_par; - /* Try to abort partially assembled seq */ - abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); - /* Make a copy of fc_hdr before the dmabuf being released */ memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); + fctl = sli4_fctl_from_fc_hdr(&fc_hdr); - /* Send abort to ULP if partially seq abort failed */ - if (abts_par == false) - lpfc_sli4_send_seq_to_ulp(vport, dmabuf); - else + if (fctl & FC_FC_EX_CTX) { + /* + * ABTS sent by responder to exchange, just free the buffer + */ lpfc_in_buf_free(phba, &dmabuf->dbuf); + } else { + /* + * ABTS sent by initiator to exchange, need to do cleanup + */ + /* Try to abort partially assembled seq */ + abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); + + /* Send abort to ULP if partially seq abort failed */ + if (abts_par == false) + lpfc_sli4_send_seq_to_ulp(vport, dmabuf); + else + lpfc_in_buf_free(phba, &dmabuf->dbuf); + } /* Send basic accept (BA_ACC) to the abort requester */ lpfc_sli4_seq_abort_acc(phba, &fc_hdr); } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 174dcda32195..ba38de3c28f1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -66,6 +66,7 @@ struct lpfc_iocbq { uint8_t abort_count; uint8_t rsvd2; uint32_t drvrTimeout; /* driver timeout in seconds */ + uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 4a9cf674555e..6a4558ba93b6 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -63,6 +63,11 @@ (fc_hdr)->fh_s_id[1] << 8 | \ (fc_hdr)->fh_s_id[2]) +#define sli4_fctl_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_f_ctl[0] << 16 | \ + (fc_hdr)->fh_f_ctl[1] << 8 | \ + (fc_hdr)->fh_f_ctl[2]) + enum lpfc_sli4_queue_type { LPFC_EQ, LPFC_GCQ, diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 096d178c4c86..7d6dd83d3592 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -700,6 +700,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } spin_unlock_irq(&phba->ndlp_lock); } + if (vport->vpi_state != LPFC_VPI_REGISTERED) + goto skip_logo; vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_issue_els_npiv_logo(vport, ndlp)) -- cgit v1.2.3-59-g8ed1b From 891478a2442d8d0077651bc8316afaec8d85dd4d Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:40:23 -0500 Subject: [SCSI] lpfc 8.3.6 : Fix AER issues Fix AER issues. - Made AER sysfs entry point return "Operation not permitted" to OneConnect HBAs - Stop and abort all I/Os on HBA for AER uncorrectable non-fatal error handling Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 33 +++++++++++++++++++++++++-------- drivers/scsi/lpfc/lpfc_hbadisc.c | 10 +++++++++- drivers/scsi/lpfc/lpfc_init.c | 29 ++++++++++++++++++++++++++--- 3 files changed, 60 insertions(+), 12 deletions(-) mode change 100644 => 100755 drivers/scsi/lpfc/lpfc_hbadisc.c diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d55befb7cf4c..75523603b91c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2835,6 +2835,9 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; int val = 0, rc = -EINVAL; + /* AER not supported on OC devices yet */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + return -EPERM; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) @@ -2851,10 +2854,11 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, phba->cfg_aer_support = 0; rc = strlen(buf); } else - rc = -EINVAL; - } else + rc = -EPERM; + } else { phba->cfg_aer_support = 0; - rc = strlen(buf); + rc = strlen(buf); + } break; case 1: if (!(phba->hba_flag & HBA_AER_ENABLED)) { @@ -2866,10 +2870,11 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, phba->cfg_aer_support = 1; rc = strlen(buf); } else - rc = -EINVAL; - } else + rc = -EPERM; + } else { phba->cfg_aer_support = 1; - rc = strlen(buf); + rc = strlen(buf); + } break; default: rc = -EINVAL; @@ -2905,6 +2910,12 @@ lpfc_param_show(aer_support) static int lpfc_aer_support_init(struct lpfc_hba *phba, int val) { + /* AER not supported on OC devices yet */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + phba->cfg_aer_support = 0; + return -EPERM; + } + if (val == 0 || val == 1) { phba->cfg_aer_support = val; return 0; @@ -2913,6 +2924,7 @@ lpfc_aer_support_init(struct lpfc_hba *phba, int val) "2712 lpfc_aer_support attribute value %d out " "of range, allowed values are 0|1, setting it " "to default value of 1\n", val); + /* By default, try to enable AER on a device */ phba->cfg_aer_support = 1; return -EINVAL; } @@ -2948,18 +2960,23 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; int val, rc = -1; + /* AER not supported on OC devices yet */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + return -EPERM; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; + if (val != 1) + return -EINVAL; - if (val == 1 && phba->hba_flag & HBA_AER_ENABLED) + if (phba->hba_flag & HBA_AER_ENABLED) rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); if (rc == 0) return strlen(buf); else - return -EINVAL; + return -EPERM; } static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c old mode 100644 new mode 100755 index 3c06aa54a3e5..4d7d8846b4da --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4369,6 +4369,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ret = 1; spin_unlock_irq(shost->host_lock); goto out; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "2624 RPI %x DID %x flg %x still " + "logged in\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag); + if (ndlp->nlp_flag & NLP_RPI_VALID) + ret = 1; } } spin_unlock_irq(shost->host_lock); @@ -4465,7 +4473,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_mbx_unreg_vpi(vports[i]); - vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; } lpfc_destroy_vport_work_array(phba, vports); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6932657d74ad..93679f30a5af 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -7141,6 +7141,28 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) return 0; } +/** + * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to prepare the SLI3 device for PCI slot recover. It + * aborts and stops all the on-going I/Os on the pci device. + **/ +static void +lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2723 PCI channel I/O abort preparing for recovery\n"); + /* Prepare for bringing HBA offline */ + lpfc_offline_prep(phba); + /* Clear sli active flag to prevent sysfs access to HBA */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + /* Stop and flush all I/Os and bring HBA offline */ + lpfc_offline(phba); +} + /** * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset * @phba: pointer to lpfc hba data structure. @@ -7156,7 +7178,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2710 PCI channel I/O frozen\n"); + "2710 PCI channel disable preparing for reset\n"); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); @@ -7181,7 +7203,7 @@ static void lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2711 PCI channel I/O permanent failure\n"); + "2711 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* Clean up all driver's outstanding SCSI I/Os */ @@ -7214,7 +7236,8 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) switch (state) { case pci_channel_io_normal: - /* Non-fatal error, do nothing */ + /* Non-fatal error, prepare for recovery */ + lpfc_sli_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ -- cgit v1.2.3-59-g8ed1b From 1c6f4ef5d6be7ef4cbe92a86286217971f52e2cd Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:40:49 -0500 Subject: [SCSI] lpfc 8.3.6 : Fix critical errors Fix errors relating to crashes and hangs. - Fix crash due to list corruption while unloading driver. - Fix panic during pci-hot-plug testing. - Fix panic when unmapping luns. - Fixed total_scsi_bufs counting could cause exhausted memory. - Fixed locking issue causing hang. - Fixed the call from lpfc_new_scsi_buf_s3 to use lpfc_release_scsi_buf_s3. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_hbadisc.c | 14 ++++++++------ drivers/scsi/lpfc/lpfc_init.c | 4 ++-- drivers/scsi/lpfc/lpfc_scsi.c | 33 ++++++++++++++++++++++++++++----- 3 files changed, 38 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4d7d8846b4da..3b9424427652 100755 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1018,13 +1018,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mempool_free(mboxq, phba->mbox_mem_pool); return; } + spin_lock_irqsave(&phba->hbalock, flags); phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - if (vport->port_state != LPFC_FLOGI) { - spin_lock_irqsave(&phba->hbalock, flags); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (vport->port_state != LPFC_FLOGI) lpfc_initial_flogi(vport); - } mempool_free(mboxq, phba->mbox_mem_pool); return; @@ -1460,12 +1459,15 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) if (phba->link_state >= LPFC_LINK_UP) lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - else + else { /* * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS * flag */ + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + } if (unreg_fcf) { spin_lock_irq(&phba->hbalock); @@ -2264,7 +2266,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * This shost reference might have been taken at the beginning of * lpfc_vport_delete() */ - if (vport->load_flag & FC_UNLOADING) + if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) scsi_host_put(shost); } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 93679f30a5af..5f5b2283d58c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2320,6 +2320,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* Release all the lpfc_scsi_bufs maintained by this host. */ + spin_lock(&phba->scsi_buf_list_lock); list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, @@ -2327,6 +2328,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) kfree(sb); phba->total_scsi_bufs--; } + spin_unlock(&phba->scsi_buf_list_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2334,9 +2336,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) kfree(io); phba->total_iocbq_bufs--; } - spin_unlock_irq(&phba->hbalock); - return 0; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf80cdefb506..a246410ce9df 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -59,6 +59,8 @@ static char *dif_op_str[] = { }; static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); +static void +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) @@ -596,7 +598,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpClass = CLASS3; psb->status = IOSTAT_SUCCESS; /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); + lpfc_release_scsi_buf_s3(phba, psb); } @@ -2766,7 +2768,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata = cmnd->device->hostdata; - struct lpfc_nodelist *ndlp = rdata->pnode; + struct lpfc_nodelist *ndlp; struct lpfc_scsi_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err; @@ -2776,6 +2778,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) cmnd->result = err; goto out_fail_command; } + ndlp = rdata->pnode; if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { @@ -3154,9 +3157,15 @@ static int lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) { struct lpfc_rport_data *rdata = cmnd->device->hostdata; - struct lpfc_nodelist *pnode = rdata->pnode; + struct lpfc_nodelist *pnode; unsigned long later; + if (!rdata) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0797 Tgt Map rport failure: rdata x%p\n", rdata); + return FAILED; + } + pnode = rdata->pnode; /* * If target is not in a MAPPED state, delay until * target is rediscovered or devloss timeout expires. @@ -3241,12 +3250,18 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_rport_data *rdata = cmnd->device->hostdata; - struct lpfc_nodelist *pnode = rdata->pnode; + struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; + if (!rdata) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0798 Device Reset rport failure: rdata x%p\n", rdata); + return FAILED; + } + pnode = rdata->pnode; fc_block_scsi_eh(cmnd); status = lpfc_chk_tgt_mapped(vport, cmnd); @@ -3300,12 +3315,18 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_rport_data *rdata = cmnd->device->hostdata; - struct lpfc_nodelist *pnode = rdata->pnode; + struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; + if (!rdata) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0799 Target Reset rport failure: rdata x%p\n", rdata); + return FAILED; + } + pnode = rdata->pnode; fc_block_scsi_eh(cmnd); status = lpfc_chk_tgt_mapped(vport, cmnd); @@ -3486,6 +3507,8 @@ lpfc_slave_alloc(struct scsi_device *sdev) "Allocated %d buffers.\n", num_to_alloc, num_allocated); } + if (num_allocated > 0) + phba->total_scsi_bufs += num_allocated; return 0; } -- cgit v1.2.3-59-g8ed1b From a747c9ce56533e376993473321d96ec8c23a3e43 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:41:10 -0500 Subject: [SCSI] lpfc 8.3.6 : Hardware related fixes and changes Hardware related Fixes and Changes. - Added new Adapter IDs and update default Adapter names. - Added PCI read after EQarm doorbell PCI write to flush the write and avoid spurrious interrupts when in INTx mode. - Phase out use of ONLINE registers. - Fix for lost MSI interrupt Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_hw.h | 3 +- drivers/scsi/lpfc/lpfc_hw4.h | 4 +- drivers/scsi/lpfc/lpfc_init.c | 168 +++++++++++++++++++++++------------------- drivers/scsi/lpfc/lpfc_sli.c | 55 ++++++++------ drivers/scsi/lpfc/lpfc_sli4.h | 7 +- 5 files changed, 133 insertions(+), 104 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index f279d191b628..c9faa1d8c3c8 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1168,7 +1168,8 @@ typedef struct { #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 #define PCI_DEVICE_ID_TIGERSHARK 0x0704 -#define PCI_DEVICE_ID_TS_BE3 0x0714 +#define PCI_DEVICE_ID_TOMCAT 0x0714 +#define PCI_DEVICE_ID_FALCON 0xf180 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index fa3306386786..1585148a17e5 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -514,8 +514,8 @@ struct lpfc_register { #define LPFC_UERR_STATUS_HI 0x00A4 #define LPFC_UERR_STATUS_LO 0x00A0 -#define LPFC_ONLINE0 0x00B0 -#define LPFC_ONLINE1 0x00B4 +#define LPFC_UE_MASK_HI 0x00AC +#define LPFC_UE_MASK_LO 0x00A8 #define LPFC_SCRATCHPAD 0x0058 /* BAR0 Registers */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5f5b2283d58c..0ba35a9a5c5f 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1529,10 +1529,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) int GE = 0; int oneConnect = 0; /* default is not a oneConnect */ struct { - char * name; - int max_speed; - char * bus; - } m = {"", 0, ""}; + char *name; + char *bus; + char *function; + } m = {"", "", ""}; if (mdp && mdp[0] != '\0' && descp && descp[0] != '\0') @@ -1553,136 +1553,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: - m = (typeof(m)){"LP6000", max_speed, "PCI"}; + m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) - m = (typeof(m)){"LP7000", max_speed, "PCI"}; + m = (typeof(m)){"LP7000", "PCI", + "Fibre Channel Adapter"}; else - m = (typeof(m)){"LP7000E", max_speed, "PCI"}; + m = (typeof(m)){"LP7000E", "PCI", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_DRAGONFLY: - m = (typeof(m)){"LP8000", max_speed, "PCI"}; + m = (typeof(m)){"LP8000", "PCI", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) - m = (typeof(m)){"LP9002", max_speed, "PCI"}; + m = (typeof(m)){"LP9002", "PCI", + "Fibre Channel Adapter"}; else - m = (typeof(m)){"LP9000", max_speed, "PCI"}; + m = (typeof(m)){"LP9000", "PCI", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_RFLY: - m = (typeof(m)){"LP952", max_speed, "PCI"}; + m = (typeof(m)){"LP952", "PCI", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: - m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; + m = (typeof(m)){"LP9802", "PCI-X", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: - m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; + m = (typeof(m)){"LP10000", "PCI-X", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: - m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; + m = (typeof(m)){"LPX1000", "PCI-X", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: - m = (typeof(m)){"LP982", max_speed, "PCI-X"}; + m = (typeof(m)){"LP982", "PCI-X", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: - m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; + m = (typeof(m)){"LP1050", "PCI-X", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: - m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; + m = (typeof(m)){"LP11000", "PCI-X2", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: - m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; + m = (typeof(m)){"LP11000-SP", "PCI-X2", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: - m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; + m = (typeof(m)){"LP11002-SP", "PCI-X2", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: - m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; + m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: - m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; + m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: - m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; + m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: - m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; + m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: - m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; + m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: - m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; + m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: - m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; + m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: - m = (typeof(m)){"LP2105", max_speed, "PCIe"}; + m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: - m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; + m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZSMB: - m = (typeof(m)){"LPe111", max_speed, "PCIe"}; + m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: - m = (typeof(m)){"LP101", max_speed, "PCI-X"}; + m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: - m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; + m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: - m = (typeof(m)){"LP11000-S", max_speed, - "PCI-X2"}; + m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: - m = (typeof(m)){"LPe11000-S", max_speed, - "PCIe"}; + m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: - m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; + m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_MID: - m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; + m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SMB: - m = (typeof(m)){"LPe121", max_speed, "PCIe"}; + m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_DCSP: - m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; + m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SCSP: - m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; + m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_S: - m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; + m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HORNET: - m = (typeof(m)){"LP21000", max_speed, "PCIe"}; + m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: - m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; + m = (typeof(m)){"LPev12000", "PCIe IOV", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: - m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; + m = (typeof(m)){"LPev12000", "PCIe IOV", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: - m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; + m = (typeof(m)){"LPemv12002-S", "PCIe IOV", + "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; - m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; + m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; break; - case PCI_DEVICE_ID_TS_BE3: + case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; - m = (typeof(m)) {"OCeXXXXX-F", max_speed, "PCIe"}; + m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; + break; + case PCI_DEVICE_ID_FALCON: + m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", + "EmulexSecure Fibre"}; break; default: - m = (typeof(m)){ NULL }; + m = (typeof(m)){"Unknown", "", ""}; break; } @@ -1694,17 +1713,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (descp && descp[0] == '\0') { if (oneConnect) snprintf(descp, 255, - "Emulex OneConnect %s, FCoE Initiator, Port %s", - m.name, + "Emulex OneConnect %s, %s Initiator, Port %s", + m.name, m.function, phba->Port); else snprintf(descp, 255, "Emulex %s %d%s %s %s", - m.name, m.max_speed, - (GE) ? "GE" : "Gb", - m.bus, - (GE) ? "FCoE Adapter" : - "Fibre Channel Adapter"); + m.name, max_speed, (GE) ? "GE" : "Gb", + m.bus, m.function); } } @@ -4618,7 +4634,6 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *phba) { struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; - uint32_t onlnreg0, onlnreg1; int i, port_error = -ENODEV; if (!phba->sli4_hba.STAregaddr) @@ -4662,21 +4677,20 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) bf_get(lpfc_scratchpad_slirev, &scratchpad), bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); - + phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); + phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); /* With uncoverable error, log the error message and return error */ - onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); - onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); - if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { - uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); - uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); - if (uerrlo_reg.word0 || uerrhi_reg.word0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1422 HBA Unrecoverable error: " - "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " - "online0_reg=0x%x, online1_reg=0x%x\n", - uerrlo_reg.word0, uerrhi_reg.word0, - onlnreg0, onlnreg1); - } + uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); + uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); + if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || + (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1422 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", + uerrlo_reg.word0, uerrhi_reg.word0, + phba->sli4_hba.ue_mask_lo, + phba->sli4_hba.ue_mask_hi); return -ENODEV; } @@ -4697,10 +4711,10 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) LPFC_UERR_STATUS_LO; phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; - phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_ONLINE0; - phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_ONLINE1; + phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UE_MASK_LO; + phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UE_MASK_HI; phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SCRATCHPAD; } @@ -8116,7 +8130,9 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TS_BE3, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 1d2f65c4eb0b..b3a69f984d95 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -263,6 +263,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); + /* PCI read to flush PCI pipeline on re-arming for INTx mode */ + if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) + readl(q->phba->sli4_hba.EQCQDBregaddr); return released; } @@ -7686,31 +7689,28 @@ static int lpfc_sli4_eratt_read(struct lpfc_hba *phba) { uint32_t uerr_sta_hi, uerr_sta_lo; - uint32_t onlnreg0, onlnreg1; /* For now, use the SLI4 device internal unrecoverable error * registers for error attention. This can be changed later. */ - onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); - onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); - if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { - uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); - uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); - if (uerr_sta_lo || uerr_sta_hi) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1423 HBA Unrecoverable error: " - "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " - "online0_reg=0x%x, online1_reg=0x%x\n", - uerr_sta_lo, uerr_sta_hi, - onlnreg0, onlnreg1); - phba->work_status[0] = uerr_sta_lo; - phba->work_status[1] = uerr_sta_hi; - /* Set the driver HA work bitmap */ - phba->work_ha |= HA_ERATT; - /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; - return 1; - } + uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); + uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); + if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || + (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1423 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", + uerr_sta_lo, uerr_sta_hi, + phba->sli4_hba.ue_mask_lo, + phba->sli4_hba.ue_mask_hi); + phba->work_status[0] = uerr_sta_lo; + phba->work_status[1] = uerr_sta_hi; + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; } return 0; } @@ -7833,7 +7833,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - uint32_t ha_copy; + uint32_t ha_copy, hc_copy; uint32_t work_ha_copy; unsigned long status; unsigned long iflag; @@ -7891,8 +7891,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } /* Clear up only attention source related to slow-path */ + hc_copy = readl(phba->HCregaddr); + writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | + HC_LAINT_ENA | HC_ERINT_ENA), + phba->HCregaddr); writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), phba->HAregaddr); + writel(hc_copy, phba->HCregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irqrestore(&phba->hbalock, iflag); } else @@ -8202,6 +8207,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id) struct lpfc_hba *phba; irqreturn_t sp_irq_rc, fp_irq_rc; unsigned long status1, status2; + uint32_t hc_copy; /* * Get the driver's phba structure from the dev_id and @@ -8239,7 +8245,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id) } /* Clear attention sources except link and error attentions */ + hc_copy = readl(phba->HCregaddr); + writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA + | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), + phba->HCregaddr); writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); + writel(hc_copy, phba->HCregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock(&phba->hbalock); diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 6a4558ba93b6..25d66d070cf8 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -293,9 +293,8 @@ struct lpfc_sli4_hba { /* BAR0 PCI config space register memory map */ void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ - void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ - void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ -#define LPFC_ONLINE_NERR 0xFFFFFFFF + void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ + void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ /* BAR1 FCoE function CSR register memory map */ void __iomem *STAregaddr; /* Address to HST_STATE register */ @@ -309,6 +308,8 @@ struct lpfc_sli4_hba { void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ + uint32_t ue_mask_lo; + uint32_t ue_mask_hi; struct msix_entry *msix_entries; uint32_t cfg_eqn; struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ -- cgit v1.2.3-59-g8ed1b From 2a7045212cef90337588f72f5dabf497f5f93a90 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 18 Nov 2009 15:41:46 -0500 Subject: [SCSI] lpfc 8.3.6 : Update lpfc driver version to 8.3.6 Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 49727c285a68..c7f3aed2aab8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.5" +#define LPFC_DRIVER_VERSION "8.3.6" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" -- cgit v1.2.3-59-g8ed1b From 70d919fbd9ab78f3eca5ea7bd060fefd7b508641 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:54:41 -0800 Subject: [SCSI] libfc: fix payload size passed to fc_frame_alloc() in fc_lport_els_request Frame header room is already incluced, just pass the length of payload. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index c841d547c298..bbf4152c9c69 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1679,8 +1679,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job, char *pp; int len; - fp = fc_frame_alloc(lport, sizeof(struct fc_frame_header) + - job->request_payload.payload_len); + fp = fc_frame_alloc(lport, job->request_payload.payload_len); if (!fp) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 859b7b649ab58ee5cbfb761491317d5b315c1b0f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Fri, 20 Nov 2009 14:54:47 -0800 Subject: [SCSI] fcoe: allow SCSI-FCP to be processed directly in softirq context Allow FCP frames to bypass the FCoE receive processing threads and handle them directly in softirq context, if they are received on the correct CPU. This preserves the queuing to threads for scaling out receive processing to multiple CPUs, but allows FCoE-aware multi-queue network drivers that direct frames to the originating CPUs to handle FCP processing with less scheduling latency. Only FCP is handled directly, because libfc makes use of mutexes in ELS handling routines. The bulk of this change is just moving the FCoE receive processing out of the receive thread function, leaving behind just the thread and queue management. The interesting bits are in fcoe_rcv() Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 245 ++++++++++++++++++++++++++--------------------- 1 file changed, 135 insertions(+), 110 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 4a43b74c0d27..32298ed60614 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -109,6 +109,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *, struct fc_frame *, void *), void *, u32 timeout); +static void fcoe_recv_frame(struct sk_buff *skb); module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); @@ -1241,11 +1242,25 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, * this skb. We also have this receive thread locked, * so we're free to queue skbs into it's queue. */ - __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->fcoe_rx_list.qlen == 1) - wake_up_process(fps->thread); - spin_unlock_bh(&fps->fcoe_rx_list.lock); + /* If this is a SCSI-FCP frame, and this is already executing on the + * correct CPU, and the queue for this CPU is empty, then go ahead + * and process the frame directly in the softirq context. + * This lets us process completions without context switching from the + * NET_RX softirq, to our receive processing thread, and then back to + * BLOCK softirq context. + */ + if (fh->fh_type == FC_TYPE_FCP && + cpu == smp_processor_id() && + skb_queue_empty(&fps->fcoe_rx_list)) { + spin_unlock_bh(&fps->fcoe_rx_list.lock); + fcoe_recv_frame(skb); + } else { + __skb_queue_tail(&fps->fcoe_rx_list, skb); + if (fps->fcoe_rx_list.qlen == 1) + wake_up_process(fps->thread); + spin_unlock_bh(&fps->fcoe_rx_list.lock); + } return 0; err: @@ -1503,26 +1518,134 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) } /** - * fcoe_percpu_receive_thread() - The per-CPU packet receive thread - * @arg: The per-CPU context - * - * Return: 0 for success + * fcoe_recv_frame() - process a single received frame + * @skb: frame to process */ -int fcoe_percpu_receive_thread(void *arg) +static void fcoe_recv_frame(struct sk_buff *skb) { - struct fcoe_percpu_s *p = arg; u32 fr_len; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fcoe_dev_stats *stats; struct fc_frame_header *fh; - struct sk_buff *skb; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; u8 *mac = NULL; struct fcoe_port *port; struct fcoe_hdr *hp; + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(!lport)) { + if (skb->destructor != fcoe_percpu_flush_done) + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); + kfree_skb(skb); + return; + } + + FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " + "head:%p data:%p tail:%p end:%p sum:%d dev:%s", + skb->len, skb->data_len, + skb->head, skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->csum, + skb->dev ? skb->dev->name : ""); + + /* + * Save source MAC address before discarding header. + */ + port = lport_priv(lport); + if (skb_is_nonlinear(skb)) + skb_linearize(skb); /* not ideal */ + mac = eth_hdr(skb)->h_source; + + /* + * Frame length checks and setting up the header pointers + * was done in fcoe_rcv already. + */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + fh = (struct fc_frame_header *) skb_transport_header(skb); + + stats = fc_lport_get_stats(lport); + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + if (stats->ErrorFrames < 5) + printk(KERN_WARNING "fcoe: FCoE version " + "mismatch: The frame has " + "version %x, but the " + "initiator supports version " + "%x\n", FC_FCOE_DECAPS_VER(hp), + FC_FCOE_VER); + stats->ErrorFrames++; + kfree_skb(skb); + return; + } + + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + + /* Copy out the CRC and EOF trailer for access */ + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + kfree_skb(skb); + return; + } + + /* + * We only check CRC if no offload is available and if it is + * it's solicited data, in which case, the FCP layer would + * check it during the copy. + */ + if (lport->crc_offload && + skb->ip_summed == CHECKSUM_UNNECESSARY) + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + else + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + + fh = fc_frame_header_get(fp); + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + fc_exch_recv(lport, fp); + return; + } + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { + if (le32_to_cpu(fr_crc(fp)) != + ~crc32(~0, skb->data, fr_len)) { + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping " + "frame with CRC error\n"); + stats->InvalidCRCCount++; + stats->ErrorFrames++; + fc_frame_free(fp); + return; + } + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + } + fc_exch_recv(lport, fp); +} + +/** + * fcoe_percpu_receive_thread() - The per-CPU packet receive thread + * @arg: The per-CPU context + * + * Return: 0 for success + */ +int fcoe_percpu_receive_thread(void *arg) +{ + struct fcoe_percpu_s *p = arg; + struct sk_buff *skb; + set_user_nice(current, -20); while (!kthread_should_stop()) { @@ -1538,105 +1661,7 @@ int fcoe_percpu_receive_thread(void *arg) spin_lock_bh(&p->fcoe_rx_list.lock); } spin_unlock_bh(&p->fcoe_rx_list.lock); - fr = fcoe_dev_from_skb(skb); - lport = fr->fr_dev; - if (unlikely(!lport)) { - if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); - kfree_skb(skb); - continue; - } - - FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " - "head:%p data:%p tail:%p end:%p sum:%d dev:%s", - skb->len, skb->data_len, - skb->head, skb->data, skb_tail_pointer(skb), - skb_end_pointer(skb), skb->csum, - skb->dev ? skb->dev->name : ""); - - /* - * Save source MAC address before discarding header. - */ - port = lport_priv(lport); - if (skb_is_nonlinear(skb)) - skb_linearize(skb); /* not ideal */ - mac = eth_hdr(skb)->h_source; - - /* - * Frame length checks and setting up the header pointers - * was done in fcoe_rcv already. - */ - hp = (struct fcoe_hdr *) skb_network_header(skb); - fh = (struct fc_frame_header *) skb_transport_header(skb); - - stats = fc_lport_get_stats(lport); - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { - if (stats->ErrorFrames < 5) - printk(KERN_WARNING "fcoe: FCoE version " - "mismatch: The frame has " - "version %x, but the " - "initiator supports version " - "%x\n", FC_FCOE_DECAPS_VER(hp), - FC_FCOE_VER); - stats->ErrorFrames++; - kfree_skb(skb); - continue; - } - - skb_pull(skb, sizeof(struct fcoe_hdr)); - fr_len = skb->len - sizeof(struct fcoe_crc_eof); - - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); - fr_dev(fp) = lport; - fr_sof(fp) = hp->fcoe_sof; - - /* Copy out the CRC and EOF trailer for access */ - if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { - kfree_skb(skb); - continue; - } - fr_eof(fp) = crc_eof.fcoe_eof; - fr_crc(fp) = crc_eof.fcoe_crc32; - if (pskb_trim(skb, fr_len)) { - kfree_skb(skb); - continue; - } - - /* - * We only check CRC if no offload is available and if it is - * it's solicited data, in which case, the FCP layer would - * check it during the copy. - */ - if (lport->crc_offload && - skb->ip_summed == CHECKSUM_UNNECESSARY) - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; - else - fr_flags(fp) |= FCPHF_CRC_UNCHECKED; - - fh = fc_frame_header_get(fp); - if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && - fh->fh_type == FC_TYPE_FCP) { - fc_exch_recv(lport, fp); - continue; - } - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping " - "frame with CRC error\n"); - stats->InvalidCRCCount++; - stats->ErrorFrames++; - fc_frame_free(fp); - continue; - } - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; - } - fc_exch_recv(lport, fp); + fcoe_recv_frame(skb); } return 0; } -- cgit v1.2.3-59-g8ed1b From 6580bbd0afe6ba1be5d53b331e92a7690046c923 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:54:52 -0800 Subject: [SCSI] libfc: add FC-BB-5 LESB counters to fcoe_dev_stats FC-BB-5 Rev2.0, Clause 7.10 extends the FC-LS-3 LESB for FC-BB_E. We are already tracking Link Failure Count so add the rest in this patch. For VLinkFailureCount and MissDiscAdvCount, they are part of the per-cpu fcoe_dev_stats. For SymbolErrorCount, ErroredBlockCount, and FCSErrorCount, they are defined in IEEE 802.3-2008 and are per LLD. They are expected to come from LLD. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/libfc.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2936fbae41e4..b97be2903cbc 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -231,6 +231,8 @@ struct fc_rport_priv { * @ControlRequests: Number of control requests * @InputMegabytes: Number of received megabytes * @OutputMegabytes: Number of transmitted megabytes + * @VLinkFailureCount: Number of virtual link failures + * @MissDiscAdvCount: Number of missing FIP discovery advertisement */ struct fcoe_dev_stats { u64 SecondsSinceLastReset; @@ -249,6 +251,8 @@ struct fcoe_dev_stats { u64 ControlRequests; u64 InputMegabytes; u64 OutputMegabytes; + u64 VLinkFailureCount; + u64 MissDiscAdvCount; }; /** -- cgit v1.2.3-59-g8ed1b From 8cdffdccd948ea4872b7b65280bc04f2fa93fc96 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:54:57 -0800 Subject: [SCSI] libfcoe: add checking disable flag in FIP_FKA_ADV When the D bit is set if the FKA_ADV_Period of the FIP Discovery Advertisement, the ENode should not transmit period ENode FIP Keep Alive and VN_Port FIP Keep Alive (FC-BB-5 Rev2, 7.8.3.13). Note that fcf->flags is taken directly from the fip_header, I am claiming one bit for the purpose of the FIP_FKA_Period D bit as FIP_FL_FK_ADV_B, and use FIP_HEADER_FLAGS as bitmask for bits used in fip_header. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 4 +++- include/scsi/fc/fc_fip.h | 12 +++++++++++- include/scsi/libfcoe.h | 1 + 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 3c501d4973e3..9961fd7310b7 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -665,6 +665,8 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_fka_desc)) goto len_err; fka = (struct fip_fka_desc *)desc; + if (fka->fd_flags & FIP_FKA_ADV_D) + fcf->fd_flags = 1; t = ntohl(fka->fd_fka_period); if (t >= FCOE_CTLR_MIN_FKA) fcf->fka_period = msecs_to_jiffies(t); @@ -1160,7 +1162,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) } } - if (sel) { + if (sel && !sel->fd_flags) { if (time_after_eq(jiffies, fip->ctlr_ka_time)) { fip->ctlr_ka_time = jiffies + sel->fka_period; fip->send_ctlr_ka = 1; diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h index 3d138c1fcf8a..17baa19380f0 100644 --- a/include/scsi/fc/fc_fip.h +++ b/include/scsi/fc/fc_fip.h @@ -214,10 +214,20 @@ struct fip_vn_desc { */ struct fip_fka_desc { struct fip_desc fd_desc; - __u8 fd_resvd[2]; + __u8 fd_resvd; + __u8 fd_flags; /* bit0 is fka disable flag */ __be32 fd_fka_period; /* adv./keep-alive period in mS */ } __attribute__((packed)); +/* + * flags for fip_fka_desc.fd_flags + */ +enum fip_fka_flags { + FIP_FKA_ADV_D = 0x01, /* no need for FKA from ENode */ +}; + +/* FIP_DT_FKA flags */ + /* * FIP_DT_VENDOR descriptor. */ diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 3837872f1965..c603f4a7e7fc 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -155,6 +155,7 @@ struct fcoe_fcf { u8 pri; u16 flags; u32 fka_period; + u8 fd_flags:1; }; /* FIP API functions */ -- cgit v1.2.3-59-g8ed1b From 2ec8493f962d55ae85c6716db414c645a6578333 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:55:02 -0800 Subject: [SCSI] libfcoe: add tracking FIP Virtual Link Failure count Add tracking the Virtual Link Failure count when either we have found the FCF as "aged" or we are receiving FIP Clear Virtual Link from the FCF. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 9961fd7310b7..34800af808e1 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -578,6 +578,7 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) WARN_ON(!fip->fcf_count); fip->fcf_count--; kfree(fcf); + fc_lport_get_stats(fip->lp)->VLinkFailureCount++; } else if (fcoe_ctlr_mtu_valid(fcf) && (!sel_time || time_before(sel_time, fcf->time))) { sel_time = fcf->time; @@ -990,6 +991,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); spin_lock_bh(&fip->lock); + fc_lport_get_stats(lport)->VLinkFailureCount++; fcoe_ctlr_reset(fip); spin_unlock_bh(&fip->lock); -- cgit v1.2.3-59-g8ed1b From f3da80e76142d63a6849556461906fbe118d1442 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:55:08 -0800 Subject: [SCSI] libfcoe: add tracking FIP Missing Discovery Advertisement count Add tracking the Missing Discovery Advertisement count for FIP Fiber Channel Forwarder (FCF) as described in FC-BB-5 Rev2.0 for LESB. The time is 1.5 times the FKA_ADV_PERIOD of the corresponding FCF. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 34800af808e1..9823291395ad 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -562,14 +562,28 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send); * times its keep-alive period including fuzz. * * In addition, determine the time when an FCF selection can occur. + * + * Also, increment the MissDiscAdvCount when no advertisement is received + * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). */ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *next; unsigned long sel_time = 0; + unsigned long mda_time = 0; list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + mda_time = fcf->fka_period + (fcf->fka_period >> 1); + if ((fip->sel_fcf == fcf) && + (time_after(jiffies, fcf->time + mda_time))) { + mod_timer(&fip->timer, jiffies + mda_time); + fc_lport_get_stats(fip->lp)->MissDiscAdvCount++; + printk(KERN_INFO "libfcoe: host%d: Missing Discovery " + "Advertisement for fab %llx count %lld\n", + fip->lp->host->host_no, fcf->fabric_name, + fc_lport_get_stats(fip->lp)->MissDiscAdvCount); + } if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { if (fip->sel_fcf == fcf) -- cgit v1.2.3-59-g8ed1b From b21a0c397eea722ff84bbeaf5e6e732a06b69896 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:55:14 -0800 Subject: [SCSI] libfc: add fcoe_fc_els_lesb to fc_fcoe.h for FC-BB-5 LESB definitions Add struct fcoe_fc_els_lesb as described in FC-BB-5 LESB for FCoE. It has the same size as LESB defined in FC-FS-3 (struct fc_els_lesb) but members have different meanings according to FC-BB-5. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc/fc_fcoe.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h index ccb3dbe90463..e6ad3d2ae475 100644 --- a/include/scsi/fc/fc_fcoe.h +++ b/include/scsi/fc/fc_fcoe.h @@ -85,6 +85,18 @@ struct fcoe_crc_eof { */ #define FCOE_MIN_FRAME 46 +/* + * FCoE Link Error Status Block: T11 FC-BB-5 Rev2.0, Clause 7.10. + */ +struct fcoe_fc_els_lesb { + __be32 lesb_link_fail; /* link failure count */ + __be32 lesb_vlink_fail; /* virtual link failure count */ + __be32 lesb_miss_fka; /* missing FIP keep-alive count */ + __be32 lesb_symb_err; /* symbol error during carrier count */ + __be32 lesb_err_block; /* errored block count */ + __be32 lesb_fcs_error; /* frame check sequence error count */ +}; + /* * fc_fcoe_set_mac - Store OUI + DID into MAC address field. * @mac: mac address to be set -- cgit v1.2.3-59-g8ed1b From b84056bf68404a5fe06b452ea9790b9927e793a6 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:55:19 -0800 Subject: [SCSI] fcoe, libfc: add get_lesb() to allow LLD to fill the link error status block (LESB) Add a member function pointer as get_lesb to libfc_function_template so LLD can fill the LESB based on its own statistics. For fcoe, it fills the LESB as a fcoe_fc_els_lesb struct according to FC-BB-5. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 34 ++++++++++++++++++++++++++++++++++ include/scsi/libfc.h | 6 ++++++ 2 files changed, 40 insertions(+) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 32298ed60614..a30ffaa1222c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -111,6 +111,8 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *, void *, u32 timeout); static void fcoe_recv_frame(struct sk_buff *skb); +static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); + module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); @@ -141,6 +143,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { .ddp_setup = fcoe_ddp_setup, .ddp_done = fcoe_ddp_done, .elsct_send = fcoe_elsct_send, + .get_lesb = fcoe_get_lesb, }; struct fc_function_template fcoe_transport_function = { @@ -2455,3 +2458,34 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, NULL, NULL, 3 * lport->r_a_tov); } + +/** + * fcoe_get_lesb() - Fill the FCoE Link Error Status Block + * @lport: the local port + * @fc_lesb: the link error status block + */ +static void fcoe_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb) +{ + unsigned int cpu; + u32 lfc, vlfc, mdac; + struct fcoe_dev_stats *devst; + struct fcoe_fc_els_lesb *lesb; + struct net_device *netdev = fcoe_netdev(lport); + + lfc = 0; + vlfc = 0; + mdac = 0; + lesb = (struct fcoe_fc_els_lesb *)fc_lesb; + memset(lesb, 0, sizeof(*lesb)); + for_each_possible_cpu(cpu) { + devst = per_cpu_ptr(lport->dev_stats, cpu); + lfc += devst->LinkFailureCount; + vlfc += devst->VLinkFailureCount; + mdac += devst->MissDiscAdvCount; + } + lesb->lesb_link_fail = htonl(lfc); + lesb->lesb_vlink_fail = htonl(vlfc); + lesb->lesb_miss_fka = htonl(mdac); + lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors); +} diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index b97be2903cbc..4b912eee33e5 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -510,6 +510,12 @@ struct libfc_function_template { * STATUS: OPTIONAL */ int (*ddp_done)(struct fc_lport *, u16); + /* + * Allow LLD to fill its own Link Error Status Block + * + * STATUS: OPTIONAL + */ + void (*get_lesb)(struct fc_lport *, struct fc_els_lesb *lesb); /* * Send a frame using an existing sequence and exchange. * -- cgit v1.2.3-59-g8ed1b From 63e27fb80c2010678681cef7b528ab8af3624fe9 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Fri, 20 Nov 2009 14:55:24 -0800 Subject: [SCSI] libfc: add support of receiving ELS_RLS Upon receiving ELS_RLS, send the Link Error Status Block (LESB) back. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 76 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 91e2ba27f7bd..35ca0e72df46 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1097,6 +1097,78 @@ drop: fc_frame_free(in_fp); } +/** + * fc_rport_recv_rls_req() - Handle received Read Link Status request + * @rdata: The remote port that sent the RLS request + * @sp: The sequence that the RLS was on + * @rx_fp: The PRLI request frame + * + * Locking Note: The rport lock is expected to be held before calling + * this function. + */ +static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, + struct fc_seq *sp, struct fc_frame *rx_fp) + +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_exch *ep = fc_seq_exch(sp); + struct fc_els_rls *rls; + struct fc_els_rls_resp *rsp; + struct fc_els_lesb *lesb; + struct fc_seq_els_data rjt_data; + struct fc_host_statistics *hst; + u32 f_ctl; + + FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", + fc_rport_state(rdata)); + + rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); + if (!rls) { + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto out_rjt; + } + + fp = fc_frame_alloc(lport, sizeof(*rsp)); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto out_rjt; + } + + rsp = fc_frame_payload_get(fp, sizeof(*rsp)); + memset(rsp, 0, sizeof(*rsp)); + rsp->rls_cmd = ELS_LS_ACC; + lesb = &rsp->rls_lesb; + if (lport->tt.get_lesb) { + /* get LESB from LLD if it supports it */ + lport->tt.get_lesb(lport, lesb); + } else { + fc_get_host_stats(lport->host); + hst = &lport->host_stats; + lesb->lesb_link_fail = htonl(hst->link_failure_count); + lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); + lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); + lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); + lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); + lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); + } + + sp = lport->tt.seq_start_next(sp); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); + goto out; + +out_rjt: + rjt_data.fp = NULL; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); +out: + fc_frame_free(rx_fp); +} + /** * fc_rport_recv_els_req() - Handler for validated ELS requests * @lport: The local port that received the ELS request @@ -1159,6 +1231,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, els_data.fp = fp; lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); break; + case ELS_RLS: + fc_rport_recv_rls_req(rdata, sp, fp); + break; default: fc_frame_free(fp); /* can't happen */ break; @@ -1203,6 +1278,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, case ELS_ADISC: case ELS_RRQ: case ELS_REC: + case ELS_RLS: fc_rport_recv_els_req(lport, sp, fp); break; default: -- cgit v1.2.3-59-g8ed1b From 0a55256d158c18e4821c248a295b7f8f4423660f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:53:57 +0100 Subject: [SCSI] libfc: Add target reset flag to FCP header file While the target reset task management function has been deprecated in newer specs, it is still in use by SCSI FC drivers and there is no real replacement. Add the target reset flag to the FCP header file to allow usage of this definition in SCSI FC drivers. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- include/scsi/fc/fc_fcp.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h index 29ecb0b02b09..747e2c7d88d6 100644 --- a/include/scsi/fc/fc_fcp.h +++ b/include/scsi/fc/fc_fcp.h @@ -83,6 +83,8 @@ struct fcp_cmnd32 { * fc_tm_flags - task management flags field. */ #define FCP_TMF_CLR_ACA 0x40 /* clear ACA condition */ +#define FCP_TMF_TGT_RESET 0x20 /* target reset task management, + deprecated as of FCP-3 */ #define FCP_TMF_LUN_RESET 0x10 /* logical unit reset task management */ #define FCP_TMF_CLR_TASK_SET 0x04 /* clear task set */ #define FCP_TMF_ABT_TASK_SET 0x02 /* abort task set */ -- cgit v1.2.3-59-g8ed1b From ecf0c7721b104c0ce9c8ca534c911f6310cf92a8 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:53:58 +0100 Subject: [SCSI] zfcp: Replace global config_lock with local list locks The global config_lock was used to protect the configuration organized in independent lists. It is not necessary to have a lock on driver level for this purpose. This patch replaces the global config_lock with a set of local list locks. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 80 ++++++++++++++++------------ drivers/s390/scsi/zfcp_ccw.c | 23 ++++---- drivers/s390/scsi/zfcp_def.h | 9 ++-- drivers/s390/scsi/zfcp_erp.c | 116 ++++++++++++++++++++++------------------- drivers/s390/scsi/zfcp_fc.c | 49 ++++++++--------- drivers/s390/scsi/zfcp_fsf.c | 22 ++++---- drivers/s390/scsi/zfcp_scsi.c | 52 ++++++++---------- drivers/s390/scsi/zfcp_sysfs.c | 48 +++++++---------- 8 files changed, 199 insertions(+), 200 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 2889e5f2dfd3..883e13948ace 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -187,7 +187,6 @@ static int __init zfcp_module_init(void) goto out_gid_cache; mutex_init(&zfcp_data.config_mutex); - rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); @@ -238,12 +237,18 @@ module_init(zfcp_module_init); */ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) { + unsigned long flags; struct zfcp_unit *unit; - list_for_each_entry(unit, &port->unit_list_head, list) + read_lock_irqsave(&port->unit_list_lock, flags); + list_for_each_entry(unit, &port->unit_list, list) if ((unit->fcp_lun == fcp_lun) && - !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) - return unit; + !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) { + zfcp_unit_get(unit); + read_unlock_irqrestore(&port->unit_list_lock, flags); + return unit; + } + read_unlock_irqrestore(&port->unit_list_lock, flags); return NULL; } @@ -257,12 +262,18 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, u64 wwpn) { + unsigned long flags; struct zfcp_port *port; - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) if ((port->wwpn == wwpn) && - !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) + !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) { + zfcp_port_get(port); + read_unlock_irqrestore(&adapter->port_list_lock, flags); return port; + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); return NULL; } @@ -284,12 +295,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_unit_by_lun(port, fcp_lun)) { - read_unlock_irq(&zfcp_data.config_lock); + unit = zfcp_get_unit_by_lun(port, fcp_lun); + if (unit) { + zfcp_unit_put(unit); return ERR_PTR(-EINVAL); } - read_unlock_irq(&zfcp_data.config_lock); unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) @@ -335,13 +345,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) zfcp_unit_get(unit); - write_lock_irq(&zfcp_data.config_lock); - list_add_tail(&unit->list, &port->unit_list_head); + write_lock_irq(&port->unit_list_lock); + list_add_tail(&unit->list, &port->unit_list); + write_unlock_irq(&port->unit_list_lock); + atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); - write_unlock_irq(&zfcp_data.config_lock); - zfcp_port_get(port); return unit; @@ -356,11 +366,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) */ void zfcp_unit_dequeue(struct zfcp_unit *unit) { + struct zfcp_port *port = unit->port; + wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0); - write_lock_irq(&zfcp_data.config_lock); - list_del(&unit->list); - write_unlock_irq(&zfcp_data.config_lock); - zfcp_port_put(unit->port); + list_del(&unit->list); /* no list locking required */ + zfcp_port_put(port); sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); device_unregister(&unit->sysfs_device); } @@ -539,11 +549,13 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_fc_gs_setup(adapter)) goto generic_services_failed; + rwlock_init(&adapter->port_list_lock); + INIT_LIST_HEAD(&adapter->port_list); + init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); - INIT_LIST_HEAD(&adapter->port_list_head); INIT_LIST_HEAD(&adapter->erp_ready_head); INIT_LIST_HEAD(&adapter->erp_running_head); @@ -650,19 +662,20 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, { struct zfcp_port *port; - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_port_by_wwpn(adapter, wwpn)) { - read_unlock_irq(&zfcp_data.config_lock); - return ERR_PTR(-EINVAL); + port = zfcp_get_port_by_wwpn(adapter, wwpn); + if (port) { + zfcp_port_put(port); + return ERR_PTR(-EEXIST); } - read_unlock_irq(&zfcp_data.config_lock); port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) return ERR_PTR(-ENOMEM); + rwlock_init(&port->unit_list_lock); + INIT_LIST_HEAD(&port->unit_list); + init_waitqueue_head(&port->remove_wq); - INIT_LIST_HEAD(&port->unit_list_head); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); @@ -698,13 +711,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, zfcp_port_get(port); - write_lock_irq(&zfcp_data.config_lock); - list_add_tail(&port->list, &adapter->port_list_head); + write_lock_irq(&adapter->port_list_lock); + list_add_tail(&port->list, &adapter->port_list); + write_unlock_irq(&adapter->port_list_lock); + atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); - write_unlock_irq(&zfcp_data.config_lock); - zfcp_adapter_get(adapter); return port; } @@ -715,12 +728,11 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, */ void zfcp_port_dequeue(struct zfcp_port *port) { - write_lock_irq(&zfcp_data.config_lock); - list_del(&port->list); - write_unlock_irq(&zfcp_data.config_lock); + struct zfcp_adapter *adapter = port->adapter; + + list_del(&port->list); /* no list locking required here */ wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); - cancel_work_sync(&port->rport_work); /* usually not necessary */ - zfcp_adapter_put(port->adapter); + zfcp_adapter_put(adapter); sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); device_unregister(&port->sysfs_device); } diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index e08339428ecf..aca2047dc2d5 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -100,10 +100,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); - if (!adapter) - goto out; mutex_unlock(&zfcp_data.config_mutex); + if (!adapter) + return; + cancel_work_sync(&adapter->scan_work); mutex_lock(&zfcp_data.config_mutex); @@ -111,18 +112,21 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) /* this also removes the scsi devices, so call it first */ zfcp_adapter_scsi_unregister(adapter); - write_lock_irq(&zfcp_data.config_lock); - list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { - list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { - list_move(&unit->list, &unit_remove_lh); + write_lock_irq(&adapter->port_list_lock); + list_for_each_entry_safe(port, p, &adapter->port_list, list) { + write_lock(&port->unit_list_lock); + list_for_each_entry_safe(unit, u, &port->unit_list, list) { atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + list_move(&unit->list, &unit_remove_lh); } - list_move(&port->list, &port_remove_lh); + write_unlock(&port->unit_list_lock); atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + list_move(&port->list, &port_remove_lh); } atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - write_unlock_irq(&zfcp_data.config_lock); + write_unlock_irq(&adapter->port_list_lock); + mutex_unlock(&zfcp_data.config_mutex); list_for_each_entry_safe(port, p, &port_remove_lh, list) { list_for_each_entry_safe(unit, u, &unit_remove_lh, list) @@ -131,9 +135,6 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) } wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); zfcp_adapter_dequeue(adapter); - -out: - mutex_unlock(&zfcp_data.config_mutex); } /** diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 7da2fad8f515..e45a08d6c98e 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -461,7 +461,8 @@ struct zfcp_adapter { u32 hardware_version; /* of FCP channel */ u16 timer_ticks; /* time int for a tick */ struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ - struct list_head port_list_head; /* remote port list */ + struct list_head port_list; /* remote port list */ + rwlock_t port_list_lock; /* port list lock */ unsigned long req_no; /* unique FSF req number */ struct list_head *req_list; /* list of pending reqs */ spinlock_t req_list_lock; /* request list lock */ @@ -504,7 +505,8 @@ struct zfcp_port { wait_queue_head_t remove_wq; /* can be used to wait for refcount drop to zero */ struct zfcp_adapter *adapter; /* adapter used to access port */ - struct list_head unit_list_head; /* head of logical unit list */ + struct list_head unit_list; /* head of logical unit list */ + rwlock_t unit_list_lock; /* unit list lock */ atomic_t status; /* status of this remote port */ u64 wwnn; /* WWNN if known */ u64 wwpn; /* WWPN */ @@ -601,9 +603,6 @@ struct zfcp_fsf_req { struct zfcp_data { struct scsi_host_template scsi_host_template; struct scsi_transport_template *scsi_transport_template; - rwlock_t config_lock; /* serialises changes - to adapter/port/unit - lists */ struct mutex config_mutex; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index f73e2180f333..464f0473877a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -99,9 +99,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); - else - list_for_each_entry(unit, &port->unit_list_head, list) - zfcp_erp_action_dismiss_unit(unit); + else { + read_lock(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) + zfcp_erp_action_dismiss_unit(unit); + read_unlock(&port->unit_list_lock); + } } static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) @@ -110,9 +113,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&adapter->erp_action); - else - list_for_each_entry(port, &adapter->port_list_head, list) + else { + read_lock(&adapter->port_list_lock); + list_for_each_entry(port, &adapter->port_list, list) zfcp_erp_action_dismiss_port(port); + read_unlock(&adapter->port_list_lock); + } } static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, @@ -264,11 +270,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, { unsigned long flags; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); - _zfcp_erp_adapter_reopen(adapter, clear, id, ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + zfcp_erp_adapter_block(adapter, clear); + zfcp_scsi_schedule_rports_block(adapter); + + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + zfcp_erp_adapter_failed(adapter, "erareo1", NULL); + else + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, + NULL, NULL, id, ref); + write_unlock_irqrestore(&adapter->erp_lock, flags); } /** @@ -345,11 +356,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id, unsigned long flags; struct zfcp_adapter *adapter = port->adapter; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); + write_lock_irqsave(&adapter->erp_lock, flags); _zfcp_erp_port_forced_reopen(port, clear, id, ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + write_unlock_irqrestore(&adapter->erp_lock, flags); } static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, @@ -377,15 +386,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, */ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) { - unsigned long flags; int retval; + unsigned long flags; struct zfcp_adapter *adapter = port->adapter; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); + write_lock_irqsave(&adapter->erp_lock, flags); retval = _zfcp_erp_port_reopen(port, clear, id, ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + write_unlock_irqrestore(&adapter->erp_lock, flags); return retval; } @@ -424,11 +431,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, struct zfcp_port *port = unit->port; struct zfcp_adapter *adapter = port->adapter; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); + write_lock_irqsave(&adapter->erp_lock, flags); _zfcp_erp_unit_reopen(unit, clear, id, ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + write_unlock_irqrestore(&adapter->erp_lock, flags); } static int status_change_set(unsigned long mask, atomic_t *status) @@ -540,8 +545,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, { struct zfcp_port *port; - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock(&adapter->port_list_lock); + list_for_each_entry(port, &adapter->port_list, list) _zfcp_erp_port_reopen(port, clear, id, ref); + read_unlock(&adapter->port_list_lock); } static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, @@ -549,8 +556,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, { struct zfcp_unit *unit; - list_for_each_entry(unit, &port->unit_list_head, list) + read_lock(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) _zfcp_erp_unit_reopen(unit, clear, id, ref); + read_unlock(&port->unit_list_lock); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) @@ -590,16 +599,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) { unsigned long flags; - read_lock_irqsave(&zfcp_data.config_lock, flags); - read_lock(&adapter->erp_lock); + read_lock_irqsave(&adapter->erp_lock, flags); if (list_empty(&adapter->erp_ready_head) && list_empty(&adapter->erp_running_head)) { atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); wake_up(&adapter->erp_done_wqh); } - read_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + read_unlock_irqrestore(&adapter->erp_lock, flags); } static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) @@ -1214,11 +1221,10 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) { int retval; - struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; + struct zfcp_adapter *adapter = erp_action->adapter; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); + write_lock_irqsave(&adapter->erp_lock, flags); zfcp_erp_strategy_check_fsfreq(erp_action); @@ -1231,11 +1237,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) zfcp_erp_action_to_running(erp_action); /* no lock to allow for blocking operations */ - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + write_unlock_irqrestore(&adapter->erp_lock, flags); retval = zfcp_erp_strategy_do_action(erp_action); - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); + write_lock_irqsave(&adapter->erp_lock, flags); if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) retval = ZFCP_ERP_CONTINUES; @@ -1273,8 +1277,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) zfcp_erp_strategy_followup_failed(erp_action); unlock: - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + write_unlock_irqrestore(&adapter->erp_lock, flags); if (retval != ZFCP_ERP_CONTINUES) zfcp_erp_action_cleanup(erp_action, retval); @@ -1415,6 +1418,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, void *ref, u32 mask, int set_or_clear) { struct zfcp_port *port; + unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; if (set_or_clear == ZFCP_SET) { @@ -1429,10 +1433,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, atomic_set(&adapter->erp_counter, 0); } - if (common_mask) - list_for_each_entry(port, &adapter->port_list_head, list) + if (common_mask) { + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) zfcp_erp_modify_port_status(port, id, ref, common_mask, set_or_clear); + read_unlock_irqrestore(&adapter->port_list_lock, flags); + } } /** @@ -1449,6 +1456,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, u32 mask, int set_or_clear) { struct zfcp_unit *unit; + unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; if (set_or_clear == ZFCP_SET) { @@ -1463,10 +1471,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, atomic_set(&port->erp_counter, 0); } - if (common_mask) - list_for_each_entry(unit, &port->unit_list_head, list) + if (common_mask) { + read_lock_irqsave(&port->unit_list_lock, flags); + list_for_each_entry(unit, &port->unit_list, list) zfcp_erp_modify_unit_status(unit, id, ref, common_mask, set_or_clear); + read_unlock_irqrestore(&port->unit_list_lock, flags); + } } /** @@ -1502,12 +1513,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, */ void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) { - unsigned long flags; - - read_lock_irqsave(&zfcp_data.config_lock, flags); zfcp_erp_modify_port_status(port, id, ref, ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } @@ -1535,13 +1542,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref) */ void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) { - unsigned long flags; - - read_lock_irqsave(&zfcp_data.config_lock, flags); zfcp_erp_modify_port_status(port, id, ref, ZFCP_STATUS_COMMON_ERP_FAILED | ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); } /** @@ -1574,12 +1577,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, void *ref) { struct zfcp_unit *unit; + unsigned long flags; int status = atomic_read(&port->status); if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED))) { - list_for_each_entry(unit, &port->unit_list_head, list) + read_lock_irqsave(&port->unit_list_lock, flags); + list_for_each_entry(unit, &port->unit_list, list) zfcp_erp_unit_access_changed(unit, id, ref); + read_unlock_irqrestore(&port->unit_list_lock, flags); return; } @@ -1595,14 +1601,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, void *ref) { - struct zfcp_port *port; unsigned long flags; + struct zfcp_port *port; if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) return; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) zfcp_erp_port_access_changed(port, id, ref); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + read_unlock_irqrestore(&adapter->port_list_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index df23bcead23d..c7efdc51df63 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -145,10 +145,11 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct fcp_rscn_element *elem) { unsigned long flags; + struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_port *port; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { if ((port->d_id & range) == (elem->nport_did & range)) zfcp_fc_test_link(port); if (!port->d_id) @@ -156,8 +157,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, ZFCP_STATUS_COMMON_ERP_FAILED, "fcrscn1", NULL); } - - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + read_unlock_irqrestore(&adapter->port_list_lock, flags); } static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) @@ -187,18 +187,17 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) { + unsigned long flags; struct zfcp_adapter *adapter = req->adapter; struct zfcp_port *port; - unsigned long flags; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) - if (port->wwpn == wwpn) + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) + if (port->wwpn == wwpn) { + zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); break; - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - if (port && (port->wwpn == wwpn)) - zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); } static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) @@ -579,20 +578,17 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, static void zfcp_fc_validate_port(struct zfcp_port *port) { - struct zfcp_adapter *adapter = port->adapter; - if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) return; atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); if ((port->supported_classes != 0) || - !list_empty(&port->unit_list_head)) { + !list_empty(&port->unit_list)) { zfcp_port_put(port); return; } zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL); - zfcp_erp_wait(adapter); zfcp_port_put(port); zfcp_port_dequeue(port); } @@ -605,6 +601,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) struct gpn_ft_resp_acc *acc = sg_virt(sg); struct zfcp_adapter *adapter = ct->wka_port->adapter; struct zfcp_port *port, *tmp; + unsigned long flags; u32 d_id; int ret = 0, x, last = 0; @@ -643,21 +640,20 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) /* skip the adapter's port and known remote ports */ if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) continue; - port = zfcp_get_port_by_wwpn(adapter, acc->wwpn); - if (port) - continue; port = zfcp_port_enqueue(adapter, acc->wwpn, ZFCP_STATUS_COMMON_NOESC, d_id); - if (IS_ERR(port)) - ret = PTR_ERR(port); - else + if (!IS_ERR(port)) zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); + else if (PTR_ERR(port) != -EEXIST) + ret = PTR_ERR(port); } zfcp_erp_wait(adapter); - list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) + write_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry_safe(port, tmp, &adapter->port_list, list) zfcp_fc_validate_port(port); + write_unlock_irqrestore(&adapter->port_list_lock, flags); mutex_unlock(&zfcp_data.config_mutex); return ret; } @@ -760,15 +756,14 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) els_fc_job->els.adapter = adapter; if (rport) { - read_lock_irq(&zfcp_data.config_lock); port = zfcp_get_port_by_wwpn(adapter, rport->port_name); - if (port) - els_fc_job->els.d_id = port->d_id; - read_unlock_irq(&zfcp_data.config_lock); if (!port) { kfree(els_fc_job); return -EINVAL; } + + els_fc_job->els.d_id = port->d_id; + zfcp_port_put(port); } else { port_did = job->request->rqst_data.h_els.port_id; els_fc_job->els.d_id = (port_did[0] << 16) + diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 4e41baa0c141..9df62f686812 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -122,36 +122,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) { + unsigned long flags; struct fsf_status_read_buffer *sr_buf = req->data; struct zfcp_adapter *adapter = req->adapter; struct zfcp_port *port; int d_id = sr_buf->d_id & ZFCP_DID_MASK; - unsigned long flags; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) if (port->d_id == d_id) { - read_unlock_irqrestore(&zfcp_data.config_lock, flags); zfcp_erp_port_reopen(port, 0, "fssrpc1", req); - return; + break; } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + read_unlock_irqrestore(&adapter->port_list_lock, flags); } static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, struct fsf_link_down_info *link_down) { struct zfcp_adapter *adapter = req->adapter; - unsigned long flags; if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) return; atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); - read_lock_irqsave(&zfcp_data.config_lock, flags); zfcp_scsi_schedule_rports_block(adapter); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); if (!link_down) goto out; @@ -1765,9 +1761,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - list_for_each_entry(unit, &port->unit_list_head, list) + read_lock(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + read_unlock(&port->unit_list_lock); zfcp_erp_port_boxed(port, "fscpph2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_RETRY; @@ -1787,9 +1785,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - list_for_each_entry(unit, &port->unit_list_head, list) + read_lock(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + read_unlock(&port->unit_list_lock); break; } } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index f54655998bd5..6feece3b2e36 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -128,49 +128,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, } static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, - int channel, unsigned int id, - unsigned int lun) + unsigned int id, u64 lun) { + unsigned long flags; struct zfcp_port *port; - struct zfcp_unit *unit; - int scsi_lun; + struct zfcp_unit *unit = NULL; - list_for_each_entry(port, &adapter->port_list_head, list) { + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { if (!port->rport || (id != port->rport->scsi_target_id)) continue; - list_for_each_entry(unit, &port->unit_list_head, list) { - scsi_lun = scsilun_to_int( - (struct scsi_lun *)&unit->fcp_lun); - if (lun == scsi_lun) - return unit; - } + unit = zfcp_get_unit_by_lun(port, lun); + if (unit) + break; } + read_unlock_irqrestore(&adapter->port_list_lock, flags); - return NULL; + return unit; } static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) { struct zfcp_adapter *adapter; struct zfcp_unit *unit; - unsigned long flags; - int retval = -ENXIO; + u64 lun; adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; if (!adapter) goto out; - read_lock_irqsave(&zfcp_data.config_lock, flags); - unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); + int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); + unit = zfcp_unit_lookup(adapter, sdp->id, lun); if (unit) { sdp->hostdata = unit; unit->device = sdp; - zfcp_unit_get(unit); - retval = 0; + return 0; } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); out: - return retval; + return -ENXIO; } static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) @@ -338,12 +333,12 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) if (!shost) return; - read_lock_irq(&zfcp_data.config_lock); - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock_irq(&adapter->port_list_lock); + list_for_each_entry(port, &adapter->port_list, list) if (port->rport) port->rport = NULL; + read_unlock_irq(&adapter->port_list_lock); - read_unlock_irq(&zfcp_data.config_lock); fc_remove_host(shost); scsi_remove_host(shost); scsi_host_put(shost); @@ -508,7 +503,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) * @rport: The FC rport where to teminate I/O * * Abort all pending SCSI commands for a port by closing the - * port. Using a reopen for avoids a conflict with a shutdown + * port. Using a reopen avoiding a conflict with a shutdown * overwriting a reopen. */ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) @@ -518,11 +513,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; - write_lock_irq(&zfcp_data.config_lock); port = zfcp_get_port_by_wwpn(adapter, rport->port_name); - if (port) - zfcp_port_get(port); - write_unlock_irq(&zfcp_data.config_lock); if (port) { zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); @@ -589,10 +580,13 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) { + unsigned long flags; struct zfcp_port *port; - list_for_each_entry(port, &adapter->port_list_head, list) + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) zfcp_scsi_schedule_rport_block(port); + read_unlock_irqrestore(&adapter->port_list_lock, flags); } void zfcp_scsi_rport_work(struct work_struct *work) diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index d31000886ca8..8430b518357e 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -153,15 +153,14 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, goto out; } - write_lock_irq(&zfcp_data.config_lock); port = zfcp_get_port_by_wwpn(adapter, wwpn); - if (port && (atomic_read(&port->refcount) == 0)) { - zfcp_port_get(port); + if (port && (atomic_read(&port->refcount) == 1)) { atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + write_lock_irq(&adapter->port_list_lock); list_move(&port->list, &port_remove_lh); + write_unlock_irq(&adapter->port_list_lock); } else port = NULL; - write_unlock_irq(&zfcp_data.config_lock); if (!port) { retval = -ENXIO; @@ -253,35 +252,28 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, goto out; } - write_lock_irq(&zfcp_data.config_lock); unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (unit) { - write_unlock_irq(&zfcp_data.config_lock); - /* wait for possible timeout during SCSI probe */ - flush_work(&unit->scsi_work); - write_lock_irq(&zfcp_data.config_lock); - - if (atomic_read(&unit->refcount) == 0) { - zfcp_unit_get(unit); - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, - &unit->status); - list_move(&unit->list, &unit_remove_lh); - } else { - unit = NULL; - } - } - - write_unlock_irq(&zfcp_data.config_lock); - if (!unit) { - retval = -ENXIO; + retval = -EINVAL; goto out; } - zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); - zfcp_erp_wait(unit->port->adapter); - zfcp_unit_put(unit); - zfcp_unit_dequeue(unit); + /* wait for possible timeout during SCSI probe */ + flush_work(&unit->scsi_work); + + if (atomic_read(&unit->refcount) == 1) { + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + + write_lock_irq(&port->unit_list_lock); + list_move(&unit->list, &unit_remove_lh); + write_unlock_irq(&port->unit_list_lock); + + zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); + zfcp_erp_wait(unit->port->adapter); + zfcp_unit_put(unit); + zfcp_unit_dequeue(unit); + } else + zfcp_unit_put(unit); out: mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; -- cgit v1.2.3-59-g8ed1b From f3450c7b917201bb49d67032e9f60d5125675d6a Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:53:59 +0100 Subject: [SCSI] zfcp: Replace local reference counting with common kref Replace the local reference counting by already available mechanisms offered by kref. Where possible existing device structures were used, including the same functionality. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 231 +++++++++++++++++++---------------------- drivers/s390/scsi/zfcp_ccw.c | 16 +-- drivers/s390/scsi/zfcp_cfdc.c | 5 +- drivers/s390/scsi/zfcp_dbf.c | 2 + drivers/s390/scsi/zfcp_def.h | 53 +--------- drivers/s390/scsi/zfcp_erp.c | 20 ++-- drivers/s390/scsi/zfcp_ext.h | 6 +- drivers/s390/scsi/zfcp_fc.c | 42 +++++--- drivers/s390/scsi/zfcp_fsf.c | 12 +-- drivers/s390/scsi/zfcp_scsi.c | 23 ++-- drivers/s390/scsi/zfcp_sysfs.c | 44 ++++---- 11 files changed, 196 insertions(+), 258 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 883e13948ace..8492ceac1409 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -96,13 +96,12 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) adapter = dev_get_drvdata(&ccwdev->dev); if (!adapter) goto out_unlock; - zfcp_adapter_get(adapter); + kref_get(&adapter->ref); port = zfcp_get_port_by_wwpn(adapter, wwpn); if (!port) goto out_port; - zfcp_port_get(port); unit = zfcp_unit_enqueue(port, lun); if (IS_ERR(unit)) goto out_unit; @@ -113,11 +112,10 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) flush_work(&unit->scsi_work); mutex_lock(&zfcp_data.config_mutex); - zfcp_unit_put(unit); out_unit: - zfcp_port_put(port); + put_device(&port->sysfs_device); out_port: - zfcp_adapter_put(adapter); + kref_put(&adapter->ref, zfcp_adapter_release); out_unlock: mutex_unlock(&zfcp_data.config_mutex); out_ccwdev: @@ -244,7 +242,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) list_for_each_entry(unit, &port->unit_list, list) if ((unit->fcp_lun == fcp_lun) && !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) { - zfcp_unit_get(unit); + get_device(&unit->sysfs_device); read_unlock_irqrestore(&port->unit_list_lock, flags); return unit; } @@ -269,7 +267,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, list_for_each_entry(port, &adapter->port_list, list) if ((port->wwpn == wwpn) && !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) { - zfcp_port_get(port); + get_device(&port->sysfs_device); read_unlock_irqrestore(&adapter->port_list_lock, flags); return port; } @@ -277,9 +275,20 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, return NULL; } -static void zfcp_sysfs_unit_release(struct device *dev) +/** + * zfcp_unit_release - dequeue unit + * @dev: pointer to device + * + * waits until all work is done on unit and removes it then from the unit->list + * of the associated port. + */ +static void zfcp_unit_release(struct device *dev) { - kfree(container_of(dev, struct zfcp_unit, sysfs_device)); + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, + sysfs_device); + + put_device(&unit->port->sysfs_device); + kfree(unit); } /** @@ -294,36 +303,39 @@ static void zfcp_sysfs_unit_release(struct device *dev) struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; + int retval = -ENOMEM; + + get_device(&port->sysfs_device); unit = zfcp_get_unit_by_lun(port, fcp_lun); if (unit) { - zfcp_unit_put(unit); - return ERR_PTR(-EINVAL); + put_device(&unit->sysfs_device); + retval = -EEXIST; + goto err_out; } unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) - return ERR_PTR(-ENOMEM); - - atomic_set(&unit->refcount, 0); - init_waitqueue_head(&unit->remove_wq); - INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); + goto err_out; unit->port = port; unit->fcp_lun = fcp_lun; + unit->sysfs_device.parent = &port->sysfs_device; + unit->sysfs_device.release = zfcp_unit_release; if (dev_set_name(&unit->sysfs_device, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); - return ERR_PTR(-ENOMEM); + goto err_out; } - unit->sysfs_device.parent = &port->sysfs_device; - unit->sysfs_device.release = zfcp_sysfs_unit_release; dev_set_drvdata(&unit->sysfs_device, unit); + retval = -EINVAL; /* mark unit unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); + spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; unit->latencies.write.fabric.min = 0xFFFFFFFF; @@ -334,16 +346,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) if (device_register(&unit->sysfs_device)) { put_device(&unit->sysfs_device); - return ERR_PTR(-EINVAL); + goto err_out; } if (sysfs_create_group(&unit->sysfs_device.kobj, - &zfcp_sysfs_unit_attrs)) { - device_unregister(&unit->sysfs_device); - return ERR_PTR(-EINVAL); - } - - zfcp_unit_get(unit); + &zfcp_sysfs_unit_attrs)) + goto err_out_put; write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); @@ -352,27 +360,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); - zfcp_port_get(port); - return unit; -} - -/** - * zfcp_unit_dequeue - dequeue unit - * @unit: pointer to zfcp_unit - * - * waits until all work is done on unit and removes it then from the unit->list - * of the associated port. - */ -void zfcp_unit_dequeue(struct zfcp_unit *unit) -{ - struct zfcp_port *port = unit->port; - wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0); - list_del(&unit->list); /* no list locking required */ - zfcp_port_put(port); - sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); +err_out_put: device_unregister(&unit->sysfs_device); +err_out: + put_device(&port->sysfs_device); + return ERR_PTR(retval); } static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) @@ -518,41 +512,44 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; - /* - * Note: It is safe to release the list_lock, as any list changes - * are protected by the config_mutex, which must be held to get here - */ + if (!get_device(&ccw_device->dev)) + return -ENODEV; adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); - if (!adapter) + if (!adapter) { + put_device(&ccw_device->dev); return -ENOMEM; + } + + kref_init(&adapter->ref); ccw_device->handler = NULL; adapter->ccw_device = ccw_device; - atomic_set(&adapter->refcount, 0); + + INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); + INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); if (zfcp_qdio_setup(adapter)) - goto qdio_failed; + goto failed; if (zfcp_allocate_low_mem_buffers(adapter)) - goto low_mem_buffers_failed; + goto failed; if (zfcp_reqlist_alloc(adapter)) - goto low_mem_buffers_failed; + goto failed; if (zfcp_dbf_adapter_register(adapter)) - goto debug_register_failed; + goto failed; if (zfcp_setup_adapter_work_queue(adapter)) - goto work_queue_failed; + goto failed; if (zfcp_fc_gs_setup(adapter)) - goto generic_services_failed; + goto failed; rwlock_init(&adapter->port_list_lock); INIT_LIST_HEAD(&adapter->port_list); - init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); @@ -565,10 +562,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->abort_lock); if (zfcp_erp_thread_setup(adapter)) - goto erp_thread_failed; - - INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); - INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); + goto failed; adapter->service_level.seq_print = zfcp_print_sl; @@ -579,54 +573,37 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (sysfs_create_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs)) - goto sysfs_failed; + goto failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); if (!zfcp_adapter_scsi_register(adapter)) return 0; -sysfs_failed: - zfcp_erp_thread_kill(adapter); -erp_thread_failed: - zfcp_fc_gs_destroy(adapter); -generic_services_failed: - zfcp_destroy_adapter_work_queue(adapter); -work_queue_failed: - zfcp_dbf_adapter_unregister(adapter->dbf); -debug_register_failed: - dev_set_drvdata(&ccw_device->dev, NULL); - kfree(adapter->req_list); -low_mem_buffers_failed: - zfcp_free_low_mem_buffers(adapter); -qdio_failed: - zfcp_qdio_destroy(adapter->qdio); - kfree(adapter); +failed: + kref_put(&adapter->ref, zfcp_adapter_release); return -ENOMEM; } /** - * zfcp_adapter_dequeue - remove the adapter from the resource list - * @adapter: pointer to struct zfcp_adapter which should be removed + * zfcp_adapter_release - remove the adapter from the resource list + * @ref: pointer to struct kref * locks: adapter list write lock is assumed to be held by caller */ -void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) +void zfcp_adapter_release(struct kref *ref) { - int retval = 0; - unsigned long flags; + struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter, + ref); + struct ccw_device *ccw_device = adapter->ccw_device; cancel_work_sync(&adapter->stat_work); + zfcp_fc_wka_ports_force_offline(adapter->gs); - sysfs_remove_group(&adapter->ccw_device->dev.kobj, - &zfcp_sysfs_adapter_attrs); - dev_set_drvdata(&adapter->ccw_device->dev, NULL); - /* sanity check: no pending FSF requests */ - spin_lock_irqsave(&adapter->req_list_lock, flags); - retval = zfcp_reqlist_isempty(adapter); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - if (!retval) - return; + sysfs_remove_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs); + + dev_set_drvdata(&ccw_device->dev, NULL); + dev_set_drvdata(&adapter->ccw_device->dev, NULL); zfcp_fc_gs_destroy(adapter); zfcp_erp_thread_kill(adapter); zfcp_destroy_adapter_work_queue(adapter); @@ -637,11 +614,30 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); kfree(adapter); + put_device(&ccw_device->dev); +} + +/** + * zfcp_device_unregister - remove port, unit from system + * @dev: reference to device which is to be removed + * @grp: related reference to attribute group + * + * Helper function to unregister port, unit from system + */ +void zfcp_device_unregister(struct device *dev, + const struct attribute_group *grp) +{ + sysfs_remove_group(&dev->kobj, grp); + device_unregister(dev); } -static void zfcp_sysfs_port_release(struct device *dev) +static void zfcp_port_release(struct device *dev) { - kfree(container_of(dev, struct zfcp_port, sysfs_device)); + struct zfcp_port *port = container_of(dev, struct zfcp_port, + sysfs_device); + + kref_put(&port->adapter->ref, zfcp_adapter_release); + kfree(port); } /** @@ -661,21 +657,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id) { struct zfcp_port *port; + int retval = -ENOMEM; + + kref_get(&adapter->ref); port = zfcp_get_port_by_wwpn(adapter, wwpn); if (port) { - zfcp_port_put(port); - return ERR_PTR(-EEXIST); + put_device(&port->sysfs_device); + retval = -EEXIST; + goto err_out; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - return ERR_PTR(-ENOMEM); + goto err_out; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); - init_waitqueue_head(&port->remove_wq); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); @@ -684,32 +683,28 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, port->d_id = d_id; port->wwpn = wwpn; port->rport_task = RPORT_NONE; + port->sysfs_device.parent = &adapter->ccw_device->dev; + port->sysfs_device.release = zfcp_port_release; /* mark port unusable as long as sysfs registration is not complete */ atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); - atomic_set(&port->refcount, 0); if (dev_set_name(&port->sysfs_device, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); - return ERR_PTR(-ENOMEM); + goto err_out; } - port->sysfs_device.parent = &adapter->ccw_device->dev; - port->sysfs_device.release = zfcp_sysfs_port_release; dev_set_drvdata(&port->sysfs_device, port); + retval = -EINVAL; if (device_register(&port->sysfs_device)) { put_device(&port->sysfs_device); - return ERR_PTR(-EINVAL); + goto err_out; } if (sysfs_create_group(&port->sysfs_device.kobj, - &zfcp_sysfs_port_attrs)) { - device_unregister(&port->sysfs_device); - return ERR_PTR(-EINVAL); - } - - zfcp_port_get(port); + &zfcp_sysfs_port_attrs)) + goto err_out_put; write_lock_irq(&adapter->port_list_lock); list_add_tail(&port->list, &adapter->port_list); @@ -718,23 +713,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); - zfcp_adapter_get(adapter); return port; -} -/** - * zfcp_port_dequeue - dequeues a port from the port list of the adapter - * @port: pointer to struct zfcp_port which should be removed - */ -void zfcp_port_dequeue(struct zfcp_port *port) -{ - struct zfcp_adapter *adapter = port->adapter; - - list_del(&port->list); /* no list locking required here */ - wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); - zfcp_adapter_put(adapter); - sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); +err_out_put: device_unregister(&port->sysfs_device); +err_out: + kref_put(&adapter->ref, zfcp_adapter_release); + return ERR_PTR(retval); } /** diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index aca2047dc2d5..c89dbe250377 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -128,13 +128,15 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) write_unlock_irq(&adapter->port_list_lock); mutex_unlock(&zfcp_data.config_mutex); - list_for_each_entry_safe(port, p, &port_remove_lh, list) { - list_for_each_entry_safe(unit, u, &unit_remove_lh, list) - zfcp_unit_dequeue(unit); - zfcp_port_dequeue(port); - } - wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); - zfcp_adapter_dequeue(adapter); + list_for_each_entry_safe(unit, u, &unit_remove_lh, list) + zfcp_device_unregister(&unit->sysfs_device, + &zfcp_sysfs_unit_attrs); + + list_for_each_entry_safe(port, p, &port_remove_lh, list) + zfcp_device_unregister(&port->sysfs_device, + &zfcp_sysfs_port_attrs); + + kref_put(&adapter->ref, zfcp_adapter_release); } /** diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index ef681dfed0cc..856f82dbcb1b 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -98,7 +98,7 @@ static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) if (!adapter) goto out_put; - zfcp_adapter_get(adapter); + kref_get(&adapter->ref); out_put: put_device(&ccwdev->dev); out: @@ -212,7 +212,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, retval = -ENXIO; goto free_buffer; } - zfcp_adapter_get(adapter); retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, data_user->control_file); @@ -245,7 +244,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, free_sg: zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); adapter_put: - zfcp_adapter_put(adapter); + kref_put(&adapter->ref, zfcp_adapter_release); free_buffer: kfree(data); no_mem_sense: diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 215b70749e95..fe818cd29dc1 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -1067,6 +1067,8 @@ err_out: */ void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) { + if (!dbf) + return; debug_unregister(dbf->scsi); debug_unregister(dbf->san); debug_unregister(dbf->hba); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e45a08d6c98e..55dc402c3aec 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -446,9 +446,7 @@ struct zfcp_qdio { }; struct zfcp_adapter { - atomic_t refcount; /* reference count */ - wait_queue_head_t remove_wq; /* can be used to wait for - refcount drop to zero */ + struct kref ref; u64 peer_wwnn; /* P2P peer WWNN */ u64 peer_wwpn; /* P2P peer WWPN */ u32 peer_d_id; /* P2P peer D_ID */ @@ -501,9 +499,6 @@ struct zfcp_port { struct device sysfs_device; /* sysfs device */ struct fc_rport *rport; /* rport of fc transport class */ struct list_head list; /* list of remote ports */ - atomic_t refcount; /* reference count */ - wait_queue_head_t remove_wq; /* can be used to wait for - refcount drop to zero */ struct zfcp_adapter *adapter; /* adapter used to access port */ struct list_head unit_list; /* head of logical unit list */ rwlock_t unit_list_lock; /* unit list lock */ @@ -525,9 +520,6 @@ struct zfcp_port { struct zfcp_unit { struct device sysfs_device; /* sysfs device */ struct list_head list; /* list of logical units */ - atomic_t refcount; /* reference count */ - wait_queue_head_t remove_wq; /* can be used to wait for - refcount drop to zero */ struct zfcp_port *port; /* remote port of unit */ atomic_t status; /* status of this logical unit */ u64 fcp_lun; /* own FCP_LUN */ @@ -656,47 +648,4 @@ zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req) return NULL; } -/* - * functions needed for reference/usage counting - */ - -static inline void -zfcp_unit_get(struct zfcp_unit *unit) -{ - atomic_inc(&unit->refcount); -} - -static inline void -zfcp_unit_put(struct zfcp_unit *unit) -{ - if (atomic_dec_return(&unit->refcount) == 0) - wake_up(&unit->remove_wq); -} - -static inline void -zfcp_port_get(struct zfcp_port *port) -{ - atomic_inc(&port->refcount); -} - -static inline void -zfcp_port_put(struct zfcp_port *port) -{ - if (atomic_dec_return(&port->refcount) == 0) - wake_up(&port->remove_wq); -} - -static inline void -zfcp_adapter_get(struct zfcp_adapter *adapter) -{ - atomic_inc(&adapter->refcount); -} - -static inline void -zfcp_adapter_put(struct zfcp_adapter *adapter) -{ - if (atomic_dec_return(&adapter->refcount) == 0) - wake_up(&adapter->remove_wq); -} - #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 464f0473877a..788fd3a4cd23 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -174,7 +174,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, switch (need) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - zfcp_unit_get(unit); + get_device(&unit->sysfs_device); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); erp_action = &unit->erp_action; if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) @@ -183,7 +183,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - zfcp_port_get(port); + get_device(&port->sysfs_device); zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; @@ -192,7 +192,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - zfcp_adapter_get(adapter); + kref_get(&adapter->ref); zfcp_erp_action_dismiss_adapter(adapter); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; @@ -1177,19 +1177,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { - zfcp_unit_get(unit); + get_device(&unit->sysfs_device); if (scsi_queue_work(unit->port->adapter->scsi_host, &unit->scsi_work) <= 0) - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); } - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: if (result == ZFCP_ERP_SUCCEEDED) zfcp_scsi_schedule_rport_register(port); - zfcp_port_put(port); + put_device(&port->sysfs_device); break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: @@ -1198,7 +1198,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) schedule_work(&adapter->scan_work); } else unregister_service_level(&adapter->service_level); - zfcp_adapter_put(adapter); + kref_put(&adapter->ref, zfcp_adapter_release); break; } } @@ -1224,8 +1224,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) unsigned long flags; struct zfcp_adapter *adapter = erp_action->adapter; - write_lock_irqsave(&adapter->erp_lock, flags); + kref_get(&adapter->ref); + write_lock_irqsave(&adapter->erp_lock, flags); zfcp_erp_strategy_check_fsfreq(erp_action); if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { @@ -1282,6 +1283,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) if (retval != ZFCP_ERP_CONTINUES) zfcp_erp_action_cleanup(erp_action, retval); + kref_put(&adapter->ref, zfcp_adapter_release); return retval; } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index b3f28deb4505..3106c3be6395 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -15,15 +15,15 @@ extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); extern int zfcp_adapter_enqueue(struct ccw_device *); -extern void zfcp_adapter_dequeue(struct zfcp_adapter *); extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, u32); -extern void zfcp_port_dequeue(struct zfcp_port *); extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); -extern void zfcp_unit_dequeue(struct zfcp_unit *); extern int zfcp_reqlist_isempty(struct zfcp_adapter *); extern void zfcp_sg_free_table(struct scatterlist *, int); extern int zfcp_sg_setup_table(struct scatterlist *, int); +extern void zfcp_device_unregister(struct device *, + const struct attribute_group *); +extern void zfcp_adapter_release(struct kref *); /* zfcp_ccw.c */ extern int zfcp_ccw_register(void); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index c7efdc51df63..6fa1bcbec0a9 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -134,6 +134,8 @@ static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) { + if (!gs) + return; zfcp_fc_wka_port_force_offline(&gs->ms); zfcp_fc_wka_port_force_offline(&gs->ts); zfcp_fc_wka_port_force_offline(&gs->ds); @@ -356,7 +358,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); out: - zfcp_port_put(port); + put_device(&port->sysfs_device); } /** @@ -365,9 +367,9 @@ out: */ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) { - zfcp_port_get(port); + get_device(&port->sysfs_device); if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) - zfcp_port_put(port); + put_device(&port->sysfs_device); } /** @@ -426,7 +428,7 @@ static void zfcp_fc_adisc_handler(unsigned long data) zfcp_scsi_schedule_rport_register(port); out: atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); - zfcp_port_put(port); + put_device(&port->sysfs_device); kfree(adisc); } @@ -468,7 +470,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) container_of(work, struct zfcp_port, test_link_work); int retval; - zfcp_port_get(port); + get_device(&port->sysfs_device); port->rport_task = RPORT_DEL; zfcp_scsi_rport_work(&port->rport_work); @@ -487,7 +489,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); out: - zfcp_port_put(port); + put_device(&port->sysfs_device); } /** @@ -500,9 +502,9 @@ out: */ void zfcp_fc_test_link(struct zfcp_port *port) { - zfcp_port_get(port); + get_device(&port->sysfs_device); if (!queue_work(port->adapter->work_queue, &port->test_link_work)) - zfcp_port_put(port); + put_device(&port->sysfs_device); } static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) @@ -576,7 +578,7 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, return ret; } -static void zfcp_fc_validate_port(struct zfcp_port *port) +static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) { if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) return; @@ -584,13 +586,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port) atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); if ((port->supported_classes != 0) || - !list_empty(&port->unit_list)) { - zfcp_port_put(port); + !list_empty(&port->unit_list)) return; - } - zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL); - zfcp_port_put(port); - zfcp_port_dequeue(port); + + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + list_move_tail(&port->list, lh); } static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) @@ -602,6 +602,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) struct zfcp_adapter *adapter = ct->wka_port->adapter; struct zfcp_port *port, *tmp; unsigned long flags; + LIST_HEAD(remove_lh); u32 d_id; int ret = 0, x, last = 0; @@ -652,9 +653,16 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) zfcp_erp_wait(adapter); write_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry_safe(port, tmp, &adapter->port_list, list) - zfcp_fc_validate_port(port); + zfcp_fc_validate_port(port, &remove_lh); write_unlock_irqrestore(&adapter->port_list_lock, flags); mutex_unlock(&zfcp_data.config_mutex); + + list_for_each_entry_safe(port, tmp, &remove_lh, list) { + zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); + zfcp_device_unregister(&port->sysfs_device, + &zfcp_sysfs_port_attrs); + } + return ret; } @@ -763,7 +771,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) } els_fc_job->els.d_id = port->d_id; - zfcp_port_put(port); + put_device(&port->sysfs_device); } else { port_did = job->request->rqst_data.h_els.port_id; els_fc_job->els.d_id = (port_did[0] << 16) + diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9df62f686812..3aad70916289 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1492,7 +1492,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) } out: - zfcp_port_put(port); + put_device(&port->sysfs_device); } /** @@ -1530,14 +1530,14 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) req->data = port; req->erp_action = erp_action; erp_action->fsf_req = req; - zfcp_port_get(port); + get_device(&port->sysfs_device); zfcp_fsf_start_erp_timer(req); retval = zfcp_fsf_req_send(req); if (retval) { zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - zfcp_port_put(port); + put_device(&port->sysfs_device); } out: spin_unlock_bh(&qdio->req_q_lock); @@ -2335,7 +2335,7 @@ skip_fsfstatus: else { zfcp_fsf_send_fcp_command_task_handler(req); req->unit = NULL; - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); } } @@ -2387,7 +2387,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - zfcp_unit_get(unit); + get_device(&unit->sysfs_device); req->unit = unit; req->data = scsi_cmnd; req->handler = zfcp_fsf_send_fcp_command_handler; @@ -2463,7 +2463,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, goto out; failed_scsi_cmnd: - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 6feece3b2e36..39a621d729e9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -52,7 +52,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; unit->device = NULL; - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); } static int zfcp_scsi_slave_configure(struct scsi_device *sdp) @@ -335,8 +335,7 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) read_lock_irq(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) - if (port->rport) - port->rport = NULL; + port->rport = NULL; read_unlock_irq(&adapter->port_list_lock); fc_remove_host(shost); @@ -356,7 +355,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); if (!fc_stats) return NULL; - adapter->fc_stats = fc_stats; /* freed in adater_dequeue */ + adapter->fc_stats = fc_stats; /* freed in adapter_release */ } memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); return adapter->fc_stats; @@ -472,7 +471,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) adapter->stats_reset = jiffies/HZ; kfree(adapter->stats_reset_data); adapter->stats_reset_data = data; /* finally freed in - adapter_dequeue */ + adapter_release */ } } @@ -517,7 +516,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) if (port) { zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); - zfcp_port_put(port); + put_device(&port->sysfs_device); } } @@ -559,23 +558,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) { - zfcp_port_get(port); + get_device(&port->sysfs_device); port->rport_task = RPORT_ADD; if (!queue_work(port->adapter->work_queue, &port->rport_work)) - zfcp_port_put(port); + put_device(&port->sysfs_device); } void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) { - zfcp_port_get(port); + get_device(&port->sysfs_device); port->rport_task = RPORT_DEL; if (port->rport && queue_work(port->adapter->work_queue, &port->rport_work)) return; - zfcp_port_put(port); + put_device(&port->sysfs_device); } void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) @@ -604,7 +603,7 @@ void zfcp_scsi_rport_work(struct work_struct *work) } } - zfcp_port_put(port); + put_device(&port->sysfs_device); } @@ -622,7 +621,7 @@ void zfcp_scsi_scan(struct work_struct *work) scsilun_to_int((struct scsi_lun *) &unit->fcp_lun), 0); - zfcp_unit_put(unit); + put_device(&unit->sysfs_device); } static int zfcp_execute_fc_job(struct fc_bsg_job *job) diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 8430b518357e..b4a7e17932c5 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -3,7 +3,7 @@ * * sysfs attributes. * - * Copyright IBM Corporation 2008 + * Copyright IBM Corporation 2008, 2009 */ #define KMSG_COMPONENT "zfcp" @@ -140,7 +140,6 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct zfcp_port *port; u64 wwpn; int retval = 0; - LIST_HEAD(port_remove_lh); mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { @@ -154,23 +153,21 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, } port = zfcp_get_port_by_wwpn(adapter, wwpn); - if (port && (atomic_read(&port->refcount) == 1)) { - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); - write_lock_irq(&adapter->port_list_lock); - list_move(&port->list, &port_remove_lh); - write_unlock_irq(&adapter->port_list_lock); - } else - port = NULL; - if (!port) { retval = -ENXIO; goto out; } + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + + write_lock_irq(&adapter->port_list_lock); + list_del(&port->list); + write_unlock_irq(&adapter->port_list_lock); + + put_device(&port->sysfs_device); + zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); - zfcp_erp_wait(adapter); - zfcp_port_put(port); - zfcp_port_dequeue(port); + zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs); out: mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; @@ -224,7 +221,6 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); zfcp_erp_wait(unit->port->adapter); flush_work(&unit->scsi_work); - zfcp_unit_put(unit); out: mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; @@ -239,7 +235,6 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, struct zfcp_unit *unit; u64 fcp_lun; int retval = 0; - LIST_HEAD(unit_remove_lh); mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { @@ -261,19 +256,16 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, /* wait for possible timeout during SCSI probe */ flush_work(&unit->scsi_work); - if (atomic_read(&unit->refcount) == 1) { - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + + write_lock_irq(&port->unit_list_lock); + list_del(&unit->list); + write_unlock_irq(&port->unit_list_lock); - write_lock_irq(&port->unit_list_lock); - list_move(&unit->list, &unit_remove_lh); - write_unlock_irq(&port->unit_list_lock); + put_device(&unit->sysfs_device); - zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); - zfcp_erp_wait(unit->port->adapter); - zfcp_unit_put(unit); - zfcp_unit_dequeue(unit); - } else - zfcp_unit_put(unit); + zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); + zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs); out: mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; -- cgit v1.2.3-59-g8ed1b From de3dc57214a1466034ecc4d4ffb10331d34c09a3 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:54:00 +0100 Subject: [SCSI] zfcp: Remove global config_mutex The global config_mutex was required for the serialization of a configuration change within the zfcp driver. This global locking is now obsolete and can be removed. The requirement of serializing the access to a zfcp_adapter reference via a ccw_device is realized wth a static spinlock. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 86 ++++++++++++------------- drivers/s390/scsi/zfcp_ccw.c | 138 +++++++++++++++++++++++------------------ drivers/s390/scsi/zfcp_cfdc.c | 25 +++----- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_ext.h | 5 +- drivers/s390/scsi/zfcp_fc.c | 3 - drivers/s390/scsi/zfcp_sysfs.c | 132 ++++++++++++++++++++++++++++++--------- 7 files changed, 232 insertions(+), 158 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 8492ceac1409..ed31bd0ff3fb 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -80,23 +80,21 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) { - struct ccw_device *ccwdev; + struct ccw_device *cdev; struct zfcp_adapter *adapter; struct zfcp_port *port; struct zfcp_unit *unit; - ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); - if (!ccwdev) + cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); + if (!cdev) return; - if (ccw_device_set_online(ccwdev)) - goto out_ccwdev; + if (ccw_device_set_online(cdev)) + goto out_ccw_device; - mutex_lock(&zfcp_data.config_mutex); - adapter = dev_get_drvdata(&ccwdev->dev); + adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) - goto out_unlock; - kref_get(&adapter->ref); + goto out_ccw_device; port = zfcp_get_port_by_wwpn(adapter, wwpn); if (!port) @@ -105,21 +103,17 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) unit = zfcp_unit_enqueue(port, lun); if (IS_ERR(unit)) goto out_unit; - mutex_unlock(&zfcp_data.config_mutex); zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); zfcp_erp_wait(adapter); flush_work(&unit->scsi_work); - mutex_lock(&zfcp_data.config_mutex); out_unit: put_device(&port->sysfs_device); out_port: - kref_put(&adapter->ref, zfcp_adapter_release); -out_unlock: - mutex_unlock(&zfcp_data.config_mutex); -out_ccwdev: - put_device(&ccwdev->dev); + zfcp_ccw_adapter_put(adapter); +out_ccw_device: + put_device(&cdev->dev); return; } @@ -184,8 +178,6 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - mutex_init(&zfcp_data.config_mutex); - zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) @@ -296,7 +288,6 @@ static void zfcp_unit_release(struct device *dev) * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error - * Locks: config_mutex must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */ @@ -371,7 +362,6 @@ err_out: static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { - /* must only be called with zfcp_data.config_mutex taken */ adapter->pool.erp_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.erp_req) @@ -419,7 +409,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { - /* zfcp_data.config_mutex must be held */ if (adapter->pool.erp_req) mempool_destroy(adapter->pool.erp_req); if (adapter->pool.scsi_req) @@ -501,24 +490,22 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device * - * Returns: 0 if a new adapter was successfully enqueued - * -ENOMEM if alloc failed + * Returns: struct zfcp_adapter* * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. - * locks: config_mutex must be held to serialize changes to the adapter list */ -int zfcp_adapter_enqueue(struct ccw_device *ccw_device) +struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; if (!get_device(&ccw_device->dev)) - return -ENODEV; + return ERR_PTR(-ENODEV); adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); if (!adapter) { put_device(&ccw_device->dev); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } kref_init(&adapter->ref); @@ -578,11 +565,30 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); if (!zfcp_adapter_scsi_register(adapter)) - return 0; + return adapter; failed: - kref_put(&adapter->ref, zfcp_adapter_release); - return -ENOMEM; + zfcp_adapter_unregister(adapter); + return ERR_PTR(-ENOMEM); +} + +void zfcp_adapter_unregister(struct zfcp_adapter *adapter) +{ + struct ccw_device *cdev = adapter->ccw_device; + + cancel_work_sync(&adapter->scan_work); + cancel_work_sync(&adapter->stat_work); + zfcp_destroy_adapter_work_queue(adapter); + + zfcp_fc_wka_ports_force_offline(adapter->gs); + zfcp_adapter_scsi_unregister(adapter); + sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); + + zfcp_erp_thread_kill(adapter); + zfcp_dbf_adapter_unregister(adapter->dbf); + zfcp_qdio_destroy(adapter->qdio); + + zfcp_ccw_adapter_put(adapter); /* final put to release */ } /** @@ -594,27 +600,16 @@ void zfcp_adapter_release(struct kref *ref) { struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter, ref); - struct ccw_device *ccw_device = adapter->ccw_device; - - cancel_work_sync(&adapter->stat_work); - - zfcp_fc_wka_ports_force_offline(adapter->gs); - sysfs_remove_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs); - - dev_set_drvdata(&ccw_device->dev, NULL); + struct ccw_device *cdev = adapter->ccw_device; dev_set_drvdata(&adapter->ccw_device->dev, NULL); zfcp_fc_gs_destroy(adapter); - zfcp_erp_thread_kill(adapter); - zfcp_destroy_adapter_work_queue(adapter); - zfcp_dbf_adapter_unregister(adapter->dbf); zfcp_free_low_mem_buffers(adapter); - zfcp_qdio_destroy(adapter->qdio); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); kfree(adapter); - put_device(&ccw_device->dev); + put_device(&cdev->dev); } /** @@ -636,7 +631,7 @@ static void zfcp_port_release(struct device *dev) struct zfcp_port *port = container_of(dev, struct zfcp_port, sysfs_device); - kref_put(&port->adapter->ref, zfcp_adapter_release); + zfcp_ccw_adapter_put(port->adapter); kfree(port); } @@ -647,7 +642,6 @@ static void zfcp_port_release(struct device *dev) * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error - * Locks: config_mutex must be held to serialize changes to the port list * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory @@ -718,7 +712,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, err_out_put: device_unregister(&port->sysfs_device); err_out: - kref_put(&adapter->ref, zfcp_adapter_release); + zfcp_ccw_adapter_put(adapter); return ERR_PTR(retval); } diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index c89dbe250377..2433eaced20c 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -13,20 +13,42 @@ #define ZFCP_MODEL_PRIV 0x4 +static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock); + +struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev) +{ + struct zfcp_adapter *adapter; + unsigned long flags; + + spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); + adapter = dev_get_drvdata(&cdev->dev); + if (adapter) + kref_get(&adapter->ref); + spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); + return adapter; +} + +void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); + kref_put(&adapter->ref, zfcp_adapter_release); + spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); +} + static int zfcp_ccw_suspend(struct ccw_device *cdev) { - struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; - mutex_lock(&zfcp_data.config_mutex); - zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); zfcp_erp_wait(adapter); - mutex_unlock(&zfcp_data.config_mutex); + zfcp_ccw_adapter_put(adapter); return 0; } @@ -34,7 +56,7 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) static int zfcp_ccw_activate(struct ccw_device *cdev) { - struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; @@ -46,6 +68,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) zfcp_erp_wait(adapter); flush_work(&adapter->scan_work); + zfcp_ccw_adapter_put(adapter); + return 0; } @@ -67,28 +91,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) /** * zfcp_ccw_probe - probe function of zfcp driver - * @ccw_device: pointer to belonging ccw device + * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer for each FCP * device found on the current system. This is only a stub to make cio * work: To only allocate adapter resources for devices actually used, * the allocation is deferred to the first call to ccw_set_online. */ -static int zfcp_ccw_probe(struct ccw_device *ccw_device) +static int zfcp_ccw_probe(struct ccw_device *cdev) { return 0; } /** * zfcp_ccw_remove - remove function of zfcp driver - * @ccw_device: pointer to belonging ccw device + * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and removes an adapter * from the system. Task of this function is to get rid of all units and * ports that belong to this adapter. And in addition all resources of this * adapter will be freed too. */ -static void zfcp_ccw_remove(struct ccw_device *ccw_device) +static void zfcp_ccw_remove(struct ccw_device *cdev) { struct zfcp_adapter *adapter; struct zfcp_port *port, *p; @@ -96,22 +120,12 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) LIST_HEAD(unit_remove_lh); LIST_HEAD(port_remove_lh); - ccw_device_set_offline(ccw_device); - - mutex_lock(&zfcp_data.config_mutex); - adapter = dev_get_drvdata(&ccw_device->dev); - mutex_unlock(&zfcp_data.config_mutex); + ccw_device_set_offline(cdev); + adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return; - cancel_work_sync(&adapter->scan_work); - - mutex_lock(&zfcp_data.config_mutex); - - /* this also removes the scsi devices, so call it first */ - zfcp_adapter_scsi_unregister(adapter); - write_lock_irq(&adapter->port_list_lock); list_for_each_entry_safe(port, p, &adapter->port_list, list) { write_lock(&port->unit_list_lock); @@ -126,7 +140,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) } atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); write_unlock_irq(&adapter->port_list_lock); - mutex_unlock(&zfcp_data.config_mutex); + zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ list_for_each_entry_safe(unit, u, &unit_remove_lh, list) zfcp_device_unregister(&unit->sysfs_device, @@ -136,12 +150,12 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs); - kref_put(&adapter->ref, zfcp_adapter_release); + zfcp_adapter_unregister(adapter); } /** * zfcp_ccw_set_online - set_online function of zfcp driver - * @ccw_device: pointer to belonging ccw device + * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an * adapter into state online. The first call will allocate all @@ -152,23 +166,20 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) * the SCSI stack, that the QDIO queues will be set up and that the * adapter will be opened. */ -static int zfcp_ccw_set_online(struct ccw_device *ccw_device) +static int zfcp_ccw_set_online(struct ccw_device *cdev) { - struct zfcp_adapter *adapter; - int ret = 0; - - mutex_lock(&zfcp_data.config_mutex); - adapter = dev_get_drvdata(&ccw_device->dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) { - ret = zfcp_adapter_enqueue(ccw_device); - if (ret) { - dev_err(&ccw_device->dev, + adapter = zfcp_adapter_enqueue(cdev); + + if (IS_ERR(adapter)) { + dev_err(&cdev->dev, "Setting up data structures for the " "FCP adapter failed\n"); - goto out; + return PTR_ERR(adapter); } - adapter = dev_get_drvdata(&ccw_device->dev); + kref_get(&adapter->ref); } /* initialize request counter */ @@ -180,58 +191,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccsonl2", NULL); zfcp_erp_wait(adapter); -out: - mutex_unlock(&zfcp_data.config_mutex); - if (!ret) - flush_work(&adapter->scan_work); - return ret; + + flush_work(&adapter->scan_work); + + zfcp_ccw_adapter_put(adapter); + return 0; } /** * zfcp_ccw_set_offline - set_offline function of zfcp driver - * @ccw_device: pointer to belonging ccw device + * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ -static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) +static int zfcp_ccw_set_offline(struct ccw_device *cdev) { - struct zfcp_adapter *adapter; + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); + + if (!adapter) + return 0; - mutex_lock(&zfcp_data.config_mutex); - adapter = dev_get_drvdata(&ccw_device->dev); zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); zfcp_erp_wait(adapter); - mutex_unlock(&zfcp_data.config_mutex); + + zfcp_ccw_adapter_put(adapter); return 0; } /** * zfcp_ccw_notify - ccw notify function - * @ccw_device: pointer to belonging ccw device + * @cdev: pointer to belonging ccw device * @event: indicates if adapter was detached or attached * * This function gets called by the common i/o layer if an adapter has gone * or reappeared. */ -static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) +static int zfcp_ccw_notify(struct ccw_device *cdev, int event) { - struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); + + if (!adapter) + return 1; switch (event) { case CIO_GONE: - dev_warn(&adapter->ccw_device->dev, - "The FCP device has been detached\n"); + dev_warn(&cdev->dev, "The FCP device has been detached\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); break; case CIO_NO_PATH: - dev_warn(&adapter->ccw_device->dev, + dev_warn(&cdev->dev, "The CHPID for the FCP device is offline\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); break; case CIO_OPER: - dev_info(&adapter->ccw_device->dev, - "The FCP device is operational again\n"); + dev_info(&cdev->dev, "The FCP device is operational again\n"); zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); @@ -239,11 +253,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) "ccnoti4", NULL); break; case CIO_BOXED: - dev_warn(&adapter->ccw_device->dev, "The FCP device " - "did not respond within the specified time\n"); + dev_warn(&cdev->dev, "The FCP device did not respond within " + "the specified time\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); break; } + + zfcp_ccw_adapter_put(adapter); return 1; } @@ -253,18 +269,16 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) */ static void zfcp_ccw_shutdown(struct ccw_device *cdev) { - struct zfcp_adapter *adapter; + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); - mutex_lock(&zfcp_data.config_mutex); - adapter = dev_get_drvdata(&cdev->dev); if (!adapter) - goto out; + return; zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); -out: - mutex_unlock(&zfcp_data.config_mutex); + + zfcp_ccw_adapter_put(adapter); } struct ccw_driver zfcp_ccw_driver = { diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index 856f82dbcb1b..f932400e980a 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -86,22 +86,17 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer, static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) { char busid[9]; - struct ccw_device *ccwdev; - struct zfcp_adapter *adapter = NULL; + struct ccw_device *cdev; + struct zfcp_adapter *adapter; snprintf(busid, sizeof(busid), "0.0.%04x", devno); - ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); - if (!ccwdev) - goto out; - - adapter = dev_get_drvdata(&ccwdev->dev); - if (!adapter) - goto out_put; - - kref_get(&adapter->ref); -out_put: - put_device(&ccwdev->dev); -out: + cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); + if (!cdev) + return NULL; + + adapter = zfcp_ccw_adapter_by_cdev(cdev); + + put_device(&cdev->dev); return adapter; } @@ -244,7 +239,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, free_sg: zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); adapter_put: - kref_put(&adapter->ref, zfcp_adapter_release); + zfcp_ccw_adapter_put(adapter); free_buffer: kfree(data); no_mem_sense: diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 55dc402c3aec..7e84e1624d16 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -595,7 +595,6 @@ struct zfcp_fsf_req { struct zfcp_data { struct scsi_host_template scsi_host_template; struct scsi_transport_template *scsi_transport_template; - struct mutex config_mutex; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3106c3be6395..1e3ec708505b 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -14,7 +14,7 @@ /* zfcp_aux.c */ extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); -extern int zfcp_adapter_enqueue(struct ccw_device *); +extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, u32); extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); @@ -24,11 +24,14 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int); extern void zfcp_device_unregister(struct device *, const struct attribute_group *); extern void zfcp_adapter_release(struct kref *); +extern void zfcp_adapter_unregister(struct zfcp_adapter *); /* zfcp_ccw.c */ extern int zfcp_ccw_register(void); extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); extern struct ccw_driver zfcp_ccw_driver; +extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *); +extern void zfcp_ccw_adapter_put(struct zfcp_adapter *); /* zfcp_cfdc.c */ extern struct miscdevice zfcp_cfdc_misc; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6fa1bcbec0a9..3e3e72cc724b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -622,8 +622,6 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) return -E2BIG; } - mutex_lock(&zfcp_data.config_mutex); - /* first entry is the header */ for (x = 1; x < max_entries && !last; x++) { if (x % (ZFCP_GPN_FT_ENTRIES + 1)) @@ -655,7 +653,6 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) list_for_each_entry_safe(port, tmp, &adapter->port_list, list) zfcp_fc_validate_port(port, &remove_lh); write_unlock_irqrestore(&adapter->port_list_lock, flags); - mutex_unlock(&zfcp_data.config_mutex); list_for_each_entry_safe(port, tmp, &remove_lh, list) { zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index b4a7e17932c5..181bea0f10fb 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -26,23 +26,36 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", - atomic_read(&adapter->status)); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", - (unsigned long long) adapter->peer_wwnn); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", - (unsigned long long) adapter->peer_wwpn); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", - adapter->peer_d_id); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", - adapter->hydra_version); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n", - adapter->fsf_lic_version); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n", - adapter->hardware_version); -ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n", - (atomic_read(&adapter->status) & - ZFCP_STATUS_COMMON_ERP_INUSE) != 0); +#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ +static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ + struct device_attribute *at,\ + char *buf) \ +{ \ + struct ccw_device *cdev = to_ccwdev(dev); \ + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \ + int i; \ + \ + if (!adapter) \ + return -ENODEV; \ + \ + i = sprintf(buf, _format, _value); \ + zfcp_ccw_adapter_put(adapter); \ + return i; \ +} \ +static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \ + zfcp_sysfs_adapter_##_name##_show, NULL); + +ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); +ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n", + (unsigned long long) adapter->peer_wwnn); +ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n", + (unsigned long long) adapter->peer_wwpn); +ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); +ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version); +ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); +ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version); +ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", atomic_read(&port->status)); @@ -88,7 +101,6 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ unsigned long val; \ int retval = 0; \ \ - mutex_lock(&zfcp_data.config_mutex); \ if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ retval = -EBUSY; \ goto out; \ @@ -105,28 +117,89 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ _reopen_id, NULL); \ zfcp_erp_wait(_adapter); \ out: \ - mutex_unlock(&zfcp_data.config_mutex); \ return retval ? retval : (ssize_t) count; \ } \ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ zfcp_sysfs_##_feat##_failed_show, \ zfcp_sysfs_##_feat##_failed_store); -ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2"); ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); +static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); + int i; + + if (!adapter) + return -ENODEV; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + i = sprintf(buf, "1\n"); + else + i = sprintf(buf, "0\n"); + + zfcp_ccw_adapter_put(adapter); + return i; +} + +static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); + unsigned long val; + int retval = 0; + + if (!adapter) + return -ENODEV; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { + retval = -EBUSY; + goto out; + } + + if (strict_strtoul(buf, 0, &val) || val != 0) { + retval = -EINVAL; + goto out; + } + + zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, + "syafai2", NULL); + zfcp_erp_wait(adapter); +out: + zfcp_ccw_adapter_put(adapter); + return retval ? retval : (ssize_t) count; +} +static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_adapter_failed_show, + zfcp_sysfs_adapter_failed_store); + static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zfcp_adapter *adapter = dev_get_drvdata(dev); + struct ccw_device *cdev = to_ccwdev(dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); int ret; - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) - return -EBUSY; + if (!adapter) + return -ENODEV; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { + ret = -EBUSY; + goto out; + } ret = zfcp_fc_scan_ports(adapter); +out: + zfcp_ccw_adapter_put(adapter); return ret ? ret : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, @@ -136,12 +209,15 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zfcp_adapter *adapter = dev_get_drvdata(dev); + struct ccw_device *cdev = to_ccwdev(dev); + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); struct zfcp_port *port; u64 wwpn; int retval = 0; - mutex_lock(&zfcp_data.config_mutex); + if (!adapter) + return -ENODEV; + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -169,7 +245,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs); out: - mutex_unlock(&zfcp_data.config_mutex); + zfcp_ccw_adapter_put(adapter); return retval ? retval : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, @@ -203,7 +279,6 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, u64 fcp_lun; int retval = -EINVAL; - mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -222,7 +297,6 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_wait(unit->port->adapter); flush_work(&unit->scsi_work); out: - mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -236,7 +310,6 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, u64 fcp_lun; int retval = 0; - mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -267,7 +340,6 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs); out: - mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); -- cgit v1.2.3-59-g8ed1b From b42aeceb35c59484056b0eea81203a0911ebb50d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:54:01 +0100 Subject: [SCSI] zfcp: Remove suspend callback The callback for suspend is not required because it contains exactly the same functionality as the _set_offline routine does. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ccw.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 2433eaced20c..ca8dffcd1e02 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -37,22 +37,6 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } -static int zfcp_ccw_suspend(struct ccw_device *cdev) - -{ - struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); - - if (!adapter) - return 0; - - zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); - zfcp_erp_wait(adapter); - - zfcp_ccw_adapter_put(adapter); - - return 0; -} - static int zfcp_ccw_activate(struct ccw_device *cdev) { @@ -291,7 +275,7 @@ struct ccw_driver zfcp_ccw_driver = { .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, - .freeze = zfcp_ccw_suspend, + .freeze = zfcp_ccw_set_offline, .thaw = zfcp_ccw_activate, .restore = zfcp_ccw_activate, }; -- cgit v1.2.3-59-g8ed1b From 25458eb791acf0e5e65183c5adb3918d8d71d756 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:02 +0100 Subject: [SCSI] zfcp: Access ports and units with container_of in sysfs code When accessing port and unit attributes, use container_of instead of dev_get_drvdata. This eliminates some code checker warnings about aliased access of data structures. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 2 -- drivers/s390/scsi/zfcp_sysfs.c | 15 ++++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index ed31bd0ff3fb..baef2ec7482f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -319,7 +319,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) kfree(unit); goto err_out; } - dev_set_drvdata(&unit->sysfs_device, unit); retval = -EINVAL; /* mark unit unusable as long as sysfs registration is not complete */ @@ -688,7 +687,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, kfree(port); goto err_out; } - dev_set_drvdata(&port->sysfs_device, port); retval = -EINVAL; if (device_register(&port->sysfs_device)) { diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 181bea0f10fb..901cc9a6ed20 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -19,7 +19,8 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ struct device_attribute *at,\ char *buf) \ { \ - struct _feat_def *_feat = dev_get_drvdata(dev); \ + struct _feat_def *_feat = container_of(dev, struct _feat_def, \ + sysfs_device); \ \ return sprintf(buf, _format, _value); \ } \ @@ -86,7 +87,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ - struct _feat_def *_feat = dev_get_drvdata(dev); \ + struct _feat_def *_feat = container_of(dev, struct _feat_def, \ + sysfs_device); \ \ if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ return sprintf(buf, "1\n"); \ @@ -97,7 +99,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ struct device_attribute *attr,\ const char *buf, size_t count)\ { \ - struct _feat_def *_feat = dev_get_drvdata(dev); \ + struct _feat_def *_feat = container_of(dev, struct _feat_def, \ + sysfs_device); \ unsigned long val; \ int retval = 0; \ \ @@ -274,7 +277,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zfcp_port *port = dev_get_drvdata(dev); + struct zfcp_port *port = container_of(dev, struct zfcp_port, + sysfs_device); struct zfcp_unit *unit; u64 fcp_lun; int retval = -EINVAL; @@ -305,7 +309,8 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zfcp_port *port = dev_get_drvdata(dev); + struct zfcp_port *port = container_of(dev, struct zfcp_port, + sysfs_device); struct zfcp_unit *unit; u64 fcp_lun; int retval = 0; -- cgit v1.2.3-59-g8ed1b From d9742b42b5c76e2a3a39de0d187fac4f6852134e Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:03 +0100 Subject: [SCSI] zfcp: Merge trace code for fsf requests in one function The latencies traced per fsf request are traced for sysfs output and for blktrace, each in one function. Simplify the tracing code by merging both tracing functions into one. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 93 +++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 54 deletions(-) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 3aad70916289..5eb96052941a 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2109,64 +2109,52 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat) lat_rec->max = max(lat_rec->max, lat); } -static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) +static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) { - struct fsf_qual_latency_info *lat_inf; - struct latency_cont *lat; + struct fsf_qual_latency_info *lat_in; + struct latency_cont *lat = NULL; struct zfcp_unit *unit = req->unit; + struct zfcp_blk_drv_data blktrc; + int ticks = req->adapter->timer_ticks; - lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; + lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; - switch (req->qtcb->bottom.io.data_direction) { - case FSF_DATADIR_READ: - lat = &unit->latencies.read; - break; - case FSF_DATADIR_WRITE: - lat = &unit->latencies.write; - break; - case FSF_DATADIR_CMND: - lat = &unit->latencies.cmd; - break; - default: - return; - } - - spin_lock(&unit->latencies.lock); - zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); - zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); - lat->counter++; - spin_unlock(&unit->latencies.lock); -} - -#ifdef CONFIG_BLK_DEV_IO_TRACE -static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_qual_latency_info *lat_inf; - struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; - struct request *req = scsi_cmnd->request; - struct zfcp_blk_drv_data trace; - int ticks = fsf_req->adapter->timer_ticks; + blktrc.flags = 0; + blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) + blktrc.flags |= ZFCP_BLK_REQ_ERROR; + blktrc.inb_usage = req->queue_req.qdio_inb_usage; + blktrc.outb_usage = req->queue_req.qdio_outb_usage; + + if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { + blktrc.flags |= ZFCP_BLK_LAT_VALID; + blktrc.channel_lat = lat_in->channel_lat * ticks; + blktrc.fabric_lat = lat_in->fabric_lat * ticks; + + switch (req->qtcb->bottom.io.data_direction) { + case FSF_DATADIR_READ: + lat = &unit->latencies.read; + break; + case FSF_DATADIR_WRITE: + lat = &unit->latencies.write; + break; + case FSF_DATADIR_CMND: + lat = &unit->latencies.cmd; + break; + } - trace.flags = 0; - trace.magic = ZFCP_BLK_DRV_DATA_MAGIC; - if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { - trace.flags |= ZFCP_BLK_LAT_VALID; - lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info; - trace.channel_lat = lat_inf->channel_lat * ticks; - trace.fabric_lat = lat_inf->fabric_lat * ticks; + if (lat) { + spin_lock(&unit->latencies.lock); + zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); + zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); + lat->counter++; + spin_unlock(&unit->latencies.lock); + } } - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) - trace.flags |= ZFCP_BLK_REQ_ERROR; - trace.inb_usage = fsf_req->queue_req.qdio_inb_usage; - trace.outb_usage = fsf_req->queue_req.qdio_outb_usage; - blk_add_driver_data(req->q, req, &trace, sizeof(trace)); -} -#else -static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) -{ + blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, + sizeof(blktrc)); } -#endif static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) { @@ -2199,10 +2187,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) scpnt->result |= fcp_rsp_iu->scsi_status; - if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) - zfcp_fsf_req_latency(req); - - zfcp_fsf_trace_latency(req); + zfcp_fsf_req_trace(req, scpnt); if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { if (fcp_rsp_info[3] == RSP_CODE_GOOD) -- cgit v1.2.3-59-g8ed1b From c1fad4176464281e776022dee7d029144afbeb13 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:04 +0100 Subject: [SCSI] zfcp: Implement module unloading With the reference counting for zfcp data structures, it is now possible to implement module unloading again. Module unloading requires to free all data structures in the module exit function. This is done by unregistering zfcp from s390 cio and the SCSI midlayer first in the module exit function. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 15 ++++++++++++++- drivers/s390/scsi/zfcp_ccw.c | 11 ----------- drivers/s390/scsi/zfcp_ext.h | 1 - 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index baef2ec7482f..12de1ce9a92d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -189,7 +189,7 @@ static int __init zfcp_module_init(void) goto out_misc; } - retval = zfcp_ccw_register(); + retval = ccw_driver_register(&zfcp_ccw_driver); if (retval) { pr_err("The zfcp device driver could not register with " "the common I/O layer\n"); @@ -218,6 +218,19 @@ out: module_init(zfcp_module_init); +static void __exit zfcp_module_exit(void) +{ + ccw_driver_unregister(&zfcp_ccw_driver); + misc_deregister(&zfcp_cfdc_misc); + fc_release_transport(zfcp_data.scsi_transport_template); + kmem_cache_destroy(zfcp_data.gid_pn_cache); + kmem_cache_destroy(zfcp_data.sr_buffer_cache); + kmem_cache_destroy(zfcp_data.qtcb_cache); + kmem_cache_destroy(zfcp_data.gpn_ft_cache); +} + +module_exit(zfcp_module_exit); + /** * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN * @port: pointer to port to search for unit diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index ca8dffcd1e02..4d35902a0cc5 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -279,14 +279,3 @@ struct ccw_driver zfcp_ccw_driver = { .thaw = zfcp_ccw_activate, .restore = zfcp_ccw_activate, }; - -/** - * zfcp_ccw_register - ccw register function - * - * Registers the driver at the common i/o layer. This function will be called - * at module load time/system start. - */ -int __init zfcp_ccw_register(void) -{ - return ccw_driver_register(&zfcp_ccw_driver); -} diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 1e3ec708505b..5f205f85e6f9 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -27,7 +27,6 @@ extern void zfcp_adapter_release(struct kref *); extern void zfcp_adapter_unregister(struct zfcp_adapter *); /* zfcp_ccw.c */ -extern int zfcp_ccw_register(void); extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); extern struct ccw_driver zfcp_ccw_driver; extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *); -- cgit v1.2.3-59-g8ed1b From 6b183334c23969d52d4d9f775da554480d05ca4d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:54:05 +0100 Subject: [SCSI] zfcp: Remove STATUS_COMMON_REMOVE flag as it is not required anymore The flag ZFCP_STATUS_COMMON_REMOVE was used to indicate that a resource is not ready to be used or about to be removed from the system. This is now better done by an improved list handling and therefore the additional indicator is not required anymore. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 27 +++++----------- drivers/s390/scsi/zfcp_ccw.c | 7 +---- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_erp.c | 6 ++-- drivers/s390/scsi/zfcp_fc.c | 1 - drivers/s390/scsi/zfcp_sysfs.c | 70 +++++++++++++----------------------------- 6 files changed, 34 insertions(+), 78 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 12de1ce9a92d..6b94f8d0609c 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -245,9 +245,9 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) read_lock_irqsave(&port->unit_list_lock, flags); list_for_each_entry(unit, &port->unit_list, list) - if ((unit->fcp_lun == fcp_lun) && - !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) { - get_device(&unit->sysfs_device); + if (unit->fcp_lun == fcp_lun) { + if (!get_device(&unit->sysfs_device)) + unit = NULL; read_unlock_irqrestore(&port->unit_list_lock, flags); return unit; } @@ -270,9 +270,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) - if ((port->wwpn == wwpn) && - !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) { - get_device(&port->sysfs_device); + if (port->wwpn == wwpn) { + if (!get_device(&port->sysfs_device)) + port = NULL; read_unlock_irqrestore(&adapter->port_list_lock, flags); return port; } @@ -334,9 +334,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) } retval = -EINVAL; - /* mark unit unusable as long as sysfs registration is not complete */ - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); - INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); spin_lock_init(&unit->latencies.lock); @@ -360,7 +357,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); - atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); return unit; @@ -565,17 +561,12 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->service_level.seq_print = zfcp_print_sl; - /* mark adapter unusable as long as sysfs registration is not complete */ - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - dev_set_drvdata(&ccw_device->dev, adapter); if (sysfs_create_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs)) goto failed; - atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - if (!zfcp_adapter_scsi_register(adapter)) return adapter; @@ -692,9 +683,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, port->sysfs_device.parent = &adapter->ccw_device->dev; port->sysfs_device.release = zfcp_port_release; - /* mark port unusable as long as sysfs registration is not complete */ - atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); - if (dev_set_name(&port->sysfs_device, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); @@ -715,8 +703,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, list_add_tail(&port->list, &adapter->port_list); write_unlock_irq(&adapter->port_list_lock); - atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); - atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); + atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); return port; diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 4d35902a0cc5..c22cb72a5ae8 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -113,16 +113,11 @@ static void zfcp_ccw_remove(struct ccw_device *cdev) write_lock_irq(&adapter->port_list_lock); list_for_each_entry_safe(port, p, &adapter->port_list, list) { write_lock(&port->unit_list_lock); - list_for_each_entry_safe(unit, u, &port->unit_list, list) { - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, - &unit->status); + list_for_each_entry_safe(unit, u, &port->unit_list, list) list_move(&unit->list, &unit_remove_lh); - } write_unlock(&port->unit_list_lock); - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); list_move(&port->list, &port_remove_lh); } - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); write_unlock_irq(&adapter->port_list_lock); zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 7e84e1624d16..08fa31302f75 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -205,7 +205,6 @@ struct zfcp_ls_adisc { #define ZFCP_COMMON_FLAGS 0xfff00000 /* common status bits */ -#define ZFCP_STATUS_COMMON_REMOVE 0x80000000 #define ZFCP_STATUS_COMMON_RUNNING 0x40000000 #define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 #define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 788fd3a4cd23..3454c2a3b6b1 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -174,7 +174,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, switch (need) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - get_device(&unit->sysfs_device); + if (!get_device(&unit->sysfs_device)) + return NULL; atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); erp_action = &unit->erp_action; if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) @@ -183,7 +184,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - get_device(&port->sysfs_device); + if (!get_device(&port->sysfs_device)) + return NULL; zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 3e3e72cc724b..9252b65a13a5 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -589,7 +589,6 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) !list_empty(&port->unit_list)) return; - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); list_move_tail(&port->list, lh); } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 901cc9a6ed20..35e920b4fd8a 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -104,10 +104,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ unsigned long val; \ int retval = 0; \ \ - if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ - retval = -EBUSY; \ - goto out; \ - } \ + if (!(_feat && get_device(&_feat->sysfs_device))) \ + return -EBUSY; \ \ if (strict_strtoul(buf, 0, &val) || val != 0) { \ retval = -EINVAL; \ @@ -120,6 +118,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ _reopen_id, NULL); \ zfcp_erp_wait(_adapter); \ out: \ + put_device(&_feat->sysfs_device); \ return retval ? retval : (ssize_t) count; \ } \ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ @@ -161,11 +160,6 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, if (!adapter) return -ENODEV; - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { - retval = -EBUSY; - goto out; - } - if (strict_strtoul(buf, 0, &val) || val != 0) { retval = -EINVAL; goto out; @@ -195,14 +189,9 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, if (!adapter) return -ENODEV; - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { - ret = -EBUSY; - goto out; - } - ret = zfcp_fc_scan_ports(adapter); -out: zfcp_ccw_adapter_put(adapter); + return ret ? ret : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, @@ -216,28 +205,19 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); struct zfcp_port *port; u64 wwpn; - int retval = 0; + int retval = -EINVAL; if (!adapter) return -ENODEV; - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { - retval = -EBUSY; + if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) goto out; - } - - if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) { - retval = -EINVAL; - goto out; - } port = zfcp_get_port_by_wwpn(adapter, wwpn); - if (!port) { - retval = -ENXIO; + if (!port) goto out; - } - - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + else + retval = 0; write_lock_irq(&adapter->port_list_lock); list_del(&port->list); @@ -283,10 +263,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, u64 fcp_lun; int retval = -EINVAL; - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { - retval = -EBUSY; - goto out; - } + if (!(port && get_device(&port->sysfs_device))) + return -EBUSY; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) goto out; @@ -294,13 +272,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, unit = zfcp_unit_enqueue(port, fcp_lun); if (IS_ERR(unit)) goto out; - - retval = 0; + else + retval = 0; zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); zfcp_erp_wait(unit->port->adapter); flush_work(&unit->scsi_work); out: + put_device(&port->sysfs_device); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -313,29 +292,23 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, sysfs_device); struct zfcp_unit *unit; u64 fcp_lun; - int retval = 0; + int retval = -EINVAL; - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { - retval = -EBUSY; - goto out; - } + if (!(port && get_device(&port->sysfs_device))) + return -EBUSY; - if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) { - retval = -EINVAL; + if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) goto out; - } unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (!unit) { - retval = -EINVAL; + if (!unit) goto out; - } + else + retval = 0; /* wait for possible timeout during SCSI probe */ flush_work(&unit->scsi_work); - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); - write_lock_irq(&port->unit_list_lock); list_del(&unit->list); write_unlock_irq(&port->unit_list_lock); @@ -345,6 +318,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs); out: + put_device(&port->sysfs_device); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); -- cgit v1.2.3-59-g8ed1b From 9eae07ef6bb5988163d8bb82cd952905db47b721 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 24 Nov 2009 16:54:06 +0100 Subject: [SCSI] zfcp: Assign scheduled work to driver queue The port_scan work was scheduled to the work_queue provided by the kernel. This resulted on SMP systems to a likely situation that more than one scan_work were processed in parallel. This is not required and openes the possibility of race conditions between the removal of invalid ports and the enqueue of just scanned ports. This patch synchronizes the scan_work tasks by scheduling them to adapter local work_queue. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 2 +- drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_ext.h | 3 +-- drivers/s390/scsi/zfcp_fc.c | 25 +++++++++---------------- drivers/s390/scsi/zfcp_fsf.c | 2 +- drivers/s390/scsi/zfcp_sysfs.c | 7 ++++--- 6 files changed, 17 insertions(+), 24 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 6b94f8d0609c..107d3f2b6e94 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -522,7 +522,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->ccw_device = ccw_device; INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); - INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); + INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); if (zfcp_qdio_setup(adapter)) goto failed; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 3454c2a3b6b1..b51a11a82e63 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -1197,7 +1197,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (result == ZFCP_ERP_SUCCEEDED) { register_service_level(&adapter->service_level); - schedule_work(&adapter->scan_work); + queue_work(adapter->work_queue, &adapter->scan_work); } else unregister_service_level(&adapter->service_level); kref_put(&adapter->ref, zfcp_adapter_release); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 5f205f85e6f9..d372146af38d 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -94,8 +94,7 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ -extern int zfcp_fc_scan_ports(struct zfcp_adapter *); -extern void _zfcp_fc_scan_ports_later(struct work_struct *); +extern void zfcp_fc_scan_ports(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 9252b65a13a5..7d6b3cadfb73 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -184,7 +184,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) range_mask = rscn_range_mask[fcp_rscn_element->addr_format]; _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); } - schedule_work(&fsf_req->adapter->scan_work); + queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); } static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) @@ -664,10 +664,12 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) /** * zfcp_fc_scan_ports - scan remote ports and attach new ports - * @adapter: pointer to struct zfcp_adapter + * @work: reference to scheduled work */ -int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) +void zfcp_fc_scan_ports(struct work_struct *work) { + struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, + scan_work); int ret, i; struct zfcp_gpn_ft *gpn_ft; int chain, max_entries, buf_num, max_bytes; @@ -679,17 +681,14 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) - return 0; + return; - ret = zfcp_fc_wka_port_get(&adapter->gs->ds); - if (ret) - return ret; + if (zfcp_fc_wka_port_get(&adapter->gs->ds)) + return; gpn_ft = zfcp_alloc_sg_env(buf_num); - if (!gpn_ft) { - ret = -ENOMEM; + if (!gpn_ft) goto out; - } for (i = 0; i < 3; i++) { ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); @@ -704,15 +703,9 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) zfcp_free_sg_env(gpn_ft, buf_num); out: zfcp_fc_wka_port_put(&adapter->gs->ds); - return ret; } -void _zfcp_fc_scan_ports_later(struct work_struct *work) -{ - zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); -} - struct zfcp_els_fc_job { struct zfcp_send_els els; struct fc_bsg_job *job; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 5eb96052941a..b6f12c826b79 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -287,7 +287,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) zfcp_erp_adapter_access_changed(adapter, "fssrh_3", req); if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) - schedule_work(&adapter->scan_work); + queue_work(adapter->work_queue, &adapter->scan_work); break; case FSF_STATUS_READ_CFDC_UPDATED: zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 35e920b4fd8a..f539e006683c 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -184,15 +184,16 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, { struct ccw_device *cdev = to_ccwdev(dev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); - int ret; if (!adapter) return -ENODEV; - ret = zfcp_fc_scan_ports(adapter); + /* sync the user-space- with the kernel-invocation of scan_work */ + queue_work(adapter->work_queue, &adapter->scan_work); + flush_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); - return ret ? ret : (ssize_t) count; + return (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, zfcp_sysfs_port_rescan_store); -- cgit v1.2.3-59-g8ed1b From 8830271c4819d86d8e87202a1fe8da0bb58912a2 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:07 +0100 Subject: [SCSI] zfcp: Dont fail SCSI commands when transitioning to blocked fc_rport If an error occurs that triggers the call to fc_remote_port_delete, ideally this call would happen before any I/O is passed back to the SCSI midlayer through scsi_done. The SCSI midlayer will retry the commands and fc_remote_port_chkready will return the correct status code. But with the delay between calling scsi_done in softirq context and the call to fc_remote_port_delete from the workqueue, there is a window where zfcp returns DID_ERROR. This leads to SCSI error recovery which then leads to offline SCSI devices since all recovery actions will fail with the rport now being blocked. In this window, zfcp has to return DID_IMM_RETRY just as the FC transport class would do in fc_remote_port_chkready for the blocked fc_rport. As soon as the fc_rport is BLOCKED, fc_remote_port_chkready will do the right thing. Additionally, there are two more cases to catch in zfcp_scsi_queuecommand: - After the port has been opened, the unit has to be opened. During this period I/O has to be retried. This can also be handled with DID_IMM_RETRY. - If the access to the unit fails, but the port is good, then this single unit cannot be accessed and I/O to this unit has to fail without involving the FC transport class. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_scsi.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 39a621d729e9..0ecec9c1b490 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -112,12 +112,26 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, } status = atomic_read(&unit->status); - if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || - !(status & ZFCP_STATUS_COMMON_RUNNING))) { + if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && + !(atomic_read(&unit->port->status) & + ZFCP_STATUS_COMMON_ERP_FAILED)) { + /* only unit access denied, but port is good + * not covered by FC transport, have to fail here */ zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; } + if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { + /* This could be either + * open unit pending: this is temporary, will result in + * open unit or ERP_FAILED, so retry command + * call to rport_delete pending: mimic retry from + * fc_remote_port_chkready until rport is BLOCKED + */ + zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); + return 0; + } + ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; -- cgit v1.2.3-59-g8ed1b From 4318e08c84e4916ac463002ffb7f9901ddb3c385 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:08 +0100 Subject: [SCSI] zfcp: Update FCP protocol related code Use common data structures for FCP CMND, FCP RSP and related definitions and remove zfcp private definitions. Split the FCP CMND setup and FCP RSP evaluation code in seperate functions. Use inline functions to not negatively impact the I/O path. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 35 +++++++------ drivers/s390/scsi/zfcp_dbf.h | 3 +- drivers/s390/scsi/zfcp_def.h | 57 --------------------- drivers/s390/scsi/zfcp_ext.h | 1 - drivers/s390/scsi/zfcp_fc.h | 112 ++++++++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_fsf.c | 97 +++++++----------------------------- drivers/s390/scsi/zfcp_scsi.c | 20 ++------ 7 files changed, 156 insertions(+), 169 deletions(-) create mode 100644 drivers/s390/scsi/zfcp_fc.h diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index fe818cd29dc1..21e5316e5003 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -870,8 +870,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; - struct fcp_rsp_iu *fcp_rsp; - char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; + struct fcp_resp_with_ext *fcp_rsp; + struct fcp_resp_rsp_info *fcp_rsp_info = NULL; + char *fcp_sns_info = NULL; int offset = 0, buflen = 0; spin_lock_irqsave(&dbf->scsi_lock, flags); @@ -895,20 +896,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, rec->scsi_allowed = scsi_cmnd->allowed; } if (fsf_req != NULL) { - fcp_rsp = (struct fcp_rsp_iu *) - &(fsf_req->qtcb->bottom.io.fcp_rsp); - fcp_rsp_info = (unsigned char *) &fcp_rsp[1]; - fcp_sns_info = - zfcp_get_fcp_sns_info_ptr(fcp_rsp); - - rec->rsp_validity = fcp_rsp->validity.value; - rec->rsp_scsi_status = fcp_rsp->scsi_status; - rec->rsp_resid = fcp_rsp->fcp_resid; - if (fcp_rsp->validity.bits.fcp_rsp_len_valid) - rec->rsp_code = *(fcp_rsp_info + 3); - if (fcp_rsp->validity.bits.fcp_sns_len_valid) { - buflen = min((int)fcp_rsp->fcp_sns_len, - ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); + fcp_rsp = (struct fcp_resp_with_ext *) + &(fsf_req->qtcb->bottom.io.fcp_rsp); + fcp_rsp_info = (struct fcp_resp_rsp_info *) + &fcp_rsp[1]; + fcp_sns_info = (char *) &fcp_rsp[1]; + if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) + fcp_sns_info += fcp_rsp->ext.fr_sns_len; + + rec->rsp_validity = fcp_rsp->resp.fr_flags; + rec->rsp_scsi_status = fcp_rsp->resp.fr_status; + rec->rsp_resid = fcp_rsp->ext.fr_resid; + if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) + rec->rsp_code = fcp_rsp_info->rsp_code; + if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { + buflen = min(fcp_rsp->ext.fr_sns_len, + (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); rec->sns_info_len = buflen; memcpy(rec->sns_info, fcp_sns_info, min(buflen, diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 6b1461e8f847..c3e25702df5b 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -22,6 +22,7 @@ #ifndef ZFCP_DBF_H #define ZFCP_DBF_H +#include #include "zfcp_ext.h" #include "zfcp_fsf.h" #include "zfcp_def.h" @@ -343,7 +344,7 @@ static inline void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, struct scsi_cmnd *scsi_cmnd) { - zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, + zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, unit->port->adapter->dbf, scsi_cmnd, NULL, 0); } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 08fa31302f75..0317e7f20850 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -73,65 +73,8 @@ /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ -/* task attribute values in FCP-2 FCP_CMND IU */ -#define SIMPLE_Q 0 -#define HEAD_OF_Q 1 -#define ORDERED_Q 2 -#define ACA_Q 4 -#define UNTAGGED 5 - -/* task management flags in FCP-2 FCP_CMND IU */ -#define FCP_CLEAR_ACA 0x40 -#define FCP_TARGET_RESET 0x20 -#define FCP_LOGICAL_UNIT_RESET 0x10 -#define FCP_CLEAR_TASK_SET 0x04 -#define FCP_ABORT_TASK_SET 0x02 - -#define FCP_CDB_LENGTH 16 - #define ZFCP_DID_MASK 0x00FFFFFF -/* FCP(-2) FCP_CMND IU */ -struct fcp_cmnd_iu { - u64 fcp_lun; /* FCP logical unit number */ - u8 crn; /* command reference number */ - u8 reserved0:5; /* reserved */ - u8 task_attribute:3; /* task attribute */ - u8 task_management_flags; /* task management flags */ - u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */ - u8 rddata:1; /* read data */ - u8 wddata:1; /* write data */ - u8 fcp_cdb[FCP_CDB_LENGTH]; -} __attribute__((packed)); - -/* FCP(-2) FCP_RSP IU */ -struct fcp_rsp_iu { - u8 reserved0[10]; - union { - struct { - u8 reserved1:3; - u8 fcp_conf_req:1; - u8 fcp_resid_under:1; - u8 fcp_resid_over:1; - u8 fcp_sns_len_valid:1; - u8 fcp_rsp_len_valid:1; - } bits; - u8 value; - } validity; - u8 scsi_status; - u32 fcp_resid; - u32 fcp_sns_len; - u32 fcp_rsp_len; -} __attribute__((packed)); - - -#define RSP_CODE_GOOD 0 -#define RSP_CODE_LENGTH_MISMATCH 1 -#define RSP_CODE_FIELD_INVALID 2 -#define RSP_CODE_RO_MISMATCH 3 -#define RSP_CODE_TASKMAN_UNSUPP 4 -#define RSP_CODE_TASKMAN_FAILED 5 - /* see fc-fs */ #define LS_RSCN 0x61 #define LS_LOGO 0x05 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d372146af38d..3832fe0ae2e4 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -154,7 +154,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *); extern struct zfcp_data zfcp_data; extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); -extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); extern struct fc_function_template zfcp_transport_functions; extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h new file mode 100644 index 000000000000..814fc2d2525a --- /dev/null +++ b/drivers/s390/scsi/zfcp_fc.h @@ -0,0 +1,112 @@ +/* + * zfcp device driver + * + * Fibre Channel related definitions and inline functions for the zfcp + * device driver + * + * Copyright IBM Corporation 2009 + */ + +#ifndef ZFCP_FC_H +#define ZFCP_FC_H + +#include +#include +#include + +/** + * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd + * @fcp: fcp_cmnd to setup + * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB + */ +static inline +void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) +{ + char tag[2]; + + int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); + + if (scsi_populate_tag_msg(scsi, tag)) { + switch (tag[0]) { + case MSG_ORDERED_TAG: + fcp->fc_pri_ta |= FCP_PTA_ORDERED; + break; + case MSG_SIMPLE_TAG: + fcp->fc_pri_ta |= FCP_PTA_SIMPLE; + break; + }; + } else + fcp->fc_pri_ta = FCP_PTA_SIMPLE; + + if (scsi->sc_data_direction == DMA_FROM_DEVICE) + fcp->fc_flags |= FCP_CFL_RDDATA; + if (scsi->sc_data_direction == DMA_TO_DEVICE) + fcp->fc_flags |= FCP_CFL_WRDATA; + + memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); + + fcp->fc_dl = scsi_bufflen(scsi); +} + +/** + * zfcp_fc_fcp_tm - setup FCP command as task management command + * @fcp: fcp_cmnd to setup + * @dev: scsi_device where to send the task management command + * @tm: task management flags to setup tm command + */ +static inline +void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags) +{ + int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun); + fcp->fc_tm_flags |= tm_flags; +} + +/** + * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly + * @fcp_rsp: FCP RSP IU to evaluate + * @scsi: SCSI command where to update status and sense buffer + */ +static inline +void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp, + struct scsi_cmnd *scsi) +{ + struct fcp_resp_rsp_info *rsp_info; + char *sense; + u32 sense_len, resid; + u8 rsp_flags; + + set_msg_byte(scsi, COMMAND_COMPLETE); + scsi->result |= fcp_rsp->resp.fr_status; + + rsp_flags = fcp_rsp->resp.fr_flags; + + if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) { + rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; + if (rsp_info->rsp_code == FCP_TMF_CMPL) + set_host_byte(scsi, DID_OK); + else { + set_host_byte(scsi, DID_ERROR); + return; + } + } + + if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { + sense = (char *) &fcp_rsp[1]; + if (rsp_flags & FCP_RSP_LEN_VAL) + sense += fcp_rsp->ext.fr_sns_len; + sense_len = min(fcp_rsp->ext.fr_sns_len, + (u32) SCSI_SENSE_BUFFERSIZE); + memcpy(scsi->sense_buffer, sense, sense_len); + } + + if (unlikely(rsp_flags & FCP_RESID_UNDER)) { + resid = fcp_rsp->ext.fr_resid; + scsi_set_resid(scsi, resid); + if (scsi_bufflen(scsi) - resid < scsi->underflow && + !(rsp_flags & FCP_SNS_LEN_VAL) && + fcp_rsp->resp.fr_status == SAM_STAT_GOOD) + set_host_byte(scsi, DID_ERROR); + } +} + +#endif diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b6f12c826b79..5f4cd03797e9 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -11,6 +11,7 @@ #include #include "zfcp_ext.h" +#include "zfcp_fc.h" #include "zfcp_dbf.h" static void zfcp_fsf_request_timeout_handler(unsigned long data) @@ -2159,10 +2160,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) { struct scsi_cmnd *scpnt; - struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) - &(req->qtcb->bottom.io.fcp_rsp); - u32 sns_len; - char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; + struct fcp_resp_with_ext *fcp_rsp; unsigned long flags; read_lock_irqsave(&req->adapter->abort_lock, flags); @@ -2183,37 +2181,11 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) goto skip_fsfstatus; } - set_msg_byte(scpnt, COMMAND_COMPLETE); - - scpnt->result |= fcp_rsp_iu->scsi_status; + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); zfcp_fsf_req_trace(req, scpnt); - if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { - if (fcp_rsp_info[3] == RSP_CODE_GOOD) - set_host_byte(scpnt, DID_OK); - else { - set_host_byte(scpnt, DID_ERROR); - goto skip_fsfstatus; - } - } - - if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { - sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) + - fcp_rsp_iu->fcp_rsp_len; - sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE); - sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len); - - memcpy(scpnt->sense_buffer, - zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len); - } - - if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) { - scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); - if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < - scpnt->underflow) - set_host_byte(scpnt, DID_ERROR); - } skip_fsfstatus: if (scpnt->result != 0) zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); @@ -2235,11 +2207,13 @@ skip_fsfstatus: static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) { - struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) - &(req->qtcb->bottom.io.fcp_rsp); - char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; + struct fcp_resp_with_ext *fcp_rsp; + struct fcp_resp_rsp_info *rsp_info; - if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; + + if ((rsp_info->rsp_code != FCP_TMF_CMPL) || (req->status & ZFCP_STATUS_FSFREQ_ERROR)) req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; } @@ -2324,20 +2298,6 @@ skip_fsfstatus: } } -static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl) -{ - u32 *fcp_dl_ptr; - - /* - * fcp_dl_addr = start address of fcp_cmnd structure + - * size of fixed part + size of dynamically sized add_dcp_cdb field - * SEE FCP-2 documentation - */ - fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] + - (fcp_cmd->add_fcp_cdb_length << 2)); - *fcp_dl_ptr = fcp_dl; -} - /** * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) * @unit: unit where command is sent to @@ -2347,7 +2307,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, struct scsi_cmnd *scsi_cmnd) { struct zfcp_fsf_req *req; - struct fcp_cmnd_iu *fcp_cmnd_iu; + struct fcp_cmnd *fcp_cmnd; unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; int real_bytes, retval = -EIO; struct zfcp_adapter *adapter = unit->port->adapter; @@ -2379,16 +2339,14 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, req->qtcb->header.lun_handle = unit->handle; req->qtcb->header.port_handle = unit->port->handle; req->qtcb->bottom.io.service_class = FSF_CLASS_3; + req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; scsi_cmnd->host_scribble = (unsigned char *) req->req_id; - fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd); - fcp_cmnd_iu->fcp_lun = unit->fcp_lun; /* * set depending on data direction: * data direction bits in SBALE (SB Type) * data direction bits in QTCB - * data direction bits in FCP_CMND IU */ switch (scsi_cmnd->sc_data_direction) { case DMA_NONE: @@ -2396,32 +2354,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, break; case DMA_FROM_DEVICE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; - fcp_cmnd_iu->rddata = 1; break; case DMA_TO_DEVICE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; sbtype = SBAL_FLAGS0_TYPE_WRITE; - fcp_cmnd_iu->wddata = 1; break; case DMA_BIDIRECTIONAL: goto failed_scsi_cmnd; } - if (likely((scsi_cmnd->device->simple_tags) || - ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && - (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED)))) - fcp_cmnd_iu->task_attribute = SIMPLE_Q; - else - fcp_cmnd_iu->task_attribute = UNTAGGED; - - if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) - fcp_cmnd_iu->add_fcp_cdb_length = - (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2; - - memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); - - req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + - fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); + fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, scsi_sglist(scsi_cmnd), @@ -2439,8 +2382,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, goto failed_scsi_cmnd; } - zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); - retval = zfcp_fsf_req_send(req); if (unlikely(retval)) goto failed_scsi_cmnd; @@ -2466,7 +2407,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; - struct fcp_cmnd_iu *fcp_cmnd_iu; + struct fcp_cmnd *fcp_cmnd; struct zfcp_qdio *qdio = unit->port->adapter->qdio; if (unlikely(!(atomic_read(&unit->status) & @@ -2492,16 +2433,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) req->qtcb->header.port_handle = unit->port->handle; req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; req->qtcb->bottom.io.service_class = FSF_CLASS_3; - req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + - sizeof(u32); + req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; - fcp_cmnd_iu->fcp_lun = unit->fcp_lun; - fcp_cmnd_iu->task_management_flags = tm_flags; + fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; + zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0ecec9c1b490..3d168410036b 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -9,6 +9,8 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include +#include #include #include "zfcp_ext.h" #include "zfcp_dbf.h" @@ -17,18 +19,6 @@ static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); -/* Find start of Sense Information in FCP response unit*/ -char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) -{ - char *fcp_sns_info_ptr; - - fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1]; - if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) - fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len; - - return fcp_sns_info_ptr; -} - static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { @@ -283,12 +273,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { - return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET); + return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { - return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET); + return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) @@ -325,7 +315,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) adapter->scsi_host->max_lun = 1; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; - adapter->scsi_host->max_cmd_len = 255; + adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; -- cgit v1.2.3-59-g8ed1b From 9d05ce2c0a6704ff84df02cbb3baef94fcac4f5d Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:09 +0100 Subject: [SCSI] zfcp: Use common code definitions for FC ELS structs Use common code definitions for FC plogi, logo, rscn and adisc structs instead of inventing private ones. Move the private struct for issuing ELS ADISC inside zfcp to zfcp_fc header file. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 45 --------------- drivers/s390/scsi/zfcp_ext.h | 4 +- drivers/s390/scsi/zfcp_fc.c | 131 ++++++++++++++++++++++--------------------- drivers/s390/scsi/zfcp_fc.h | 17 ++++++ drivers/s390/scsi/zfcp_fsf.c | 45 +++++++-------- drivers/s390/scsi/zfcp_fsf.h | 20 +------ 6 files changed, 108 insertions(+), 154 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 0317e7f20850..fae8f2ebd43f 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -75,51 +75,6 @@ #define ZFCP_DID_MASK 0x00FFFFFF -/* see fc-fs */ -#define LS_RSCN 0x61 -#define LS_LOGO 0x05 -#define LS_PLOGI 0x03 - -struct fcp_rscn_head { - u8 command; - u8 page_length; /* always 0x04 */ - u16 payload_len; -} __attribute__((packed)); - -struct fcp_rscn_element { - u8 reserved:2; - u8 event_qual:4; - u8 addr_format:2; - u32 nport_did:24; -} __attribute__((packed)); - -/* see fc-ph */ -struct fcp_logo { - u32 command; - u32 nport_did; - u64 nport_wwpn; -} __attribute__((packed)); - -/* - * FC-FS stuff - */ -#define R_A_TOV 10 /* seconds */ - -#define ZFCP_LS_RLS 0x0f -#define ZFCP_LS_ADISC 0x52 -#define ZFCP_LS_RPS 0x56 -#define ZFCP_LS_RSCN 0x61 -#define ZFCP_LS_RNID 0x78 - -struct zfcp_ls_adisc { - u8 code; - u8 field[3]; - u32 hard_nport_id; - u64 wwpn; - u64 wwnn; - u32 nport_id; -} __attribute__ ((packed)); - /* * FC-GS-2 stuff */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3832fe0ae2e4..c2b23b5a3d0a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -9,6 +9,8 @@ #ifndef ZFCP_EXT_H #define ZFCP_EXT_H +#include +#include #include "zfcp_def.h" /* zfcp_aux.c */ @@ -98,7 +100,7 @@ extern void zfcp_fc_scan_ports(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); -extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); +extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *); extern void zfcp_fc_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 7d6b3cadfb73..e03410043cd7 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -9,20 +9,17 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include +#include +#include #include "zfcp_ext.h" +#include "zfcp_fc.h" -enum rscn_address_format { - RSCN_PORT_ADDRESS = 0x0, - RSCN_AREA_ADDRESS = 0x1, - RSCN_DOMAIN_ADDRESS = 0x2, - RSCN_FABRIC_ADDRESS = 0x3, -}; - -static u32 rscn_range_mask[] = { - [RSCN_PORT_ADDRESS] = 0xFFFFFF, - [RSCN_AREA_ADDRESS] = 0xFFFF00, - [RSCN_DOMAIN_ADDRESS] = 0xFF0000, - [RSCN_FABRIC_ADDRESS] = 0x000000, +static u32 zfcp_fc_rscn_range_mask[] = { + [ELS_ADDR_FMT_PORT] = 0xFFFFFF, + [ELS_ADDR_FMT_AREA] = 0xFFFF00, + [ELS_ADDR_FMT_DOM] = 0xFF0000, + [ELS_ADDR_FMT_FAB] = 0x000000, }; struct gpn_ft_resp_acc { @@ -144,7 +141,7 @@ void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) } static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, - struct fcp_rscn_element *elem) + struct fc_els_rscn_page *page) { unsigned long flags; struct zfcp_adapter *adapter = fsf_req->adapter; @@ -152,7 +149,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) { - if ((port->d_id & range) == (elem->nport_did & range)) + if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) zfcp_fc_test_link(port); if (!port->d_id) zfcp_erp_port_reopen(port, @@ -165,24 +162,24 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) { struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; - struct fcp_rscn_head *fcp_rscn_head; - struct fcp_rscn_element *fcp_rscn_element; + struct fc_els_rscn *head; + struct fc_els_rscn_page *page; u16 i; u16 no_entries; - u32 range_mask; + unsigned int afmt; - fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data; - fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head; + head = (struct fc_els_rscn *) status_buffer->payload.data; + page = (struct fc_els_rscn_page *) head; /* see FC-FS */ - no_entries = fcp_rscn_head->payload_len / - sizeof(struct fcp_rscn_element); + no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page); for (i = 1; i < no_entries; i++) { /* skip head and start with 1st element */ - fcp_rscn_element++; - range_mask = rscn_range_mask[fcp_rscn_element->addr_format]; - _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); + page++; + afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; + _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], + page); } queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); } @@ -204,22 +201,22 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) { - struct fsf_status_read_buffer *status_buffer = - (struct fsf_status_read_buffer *)req->data; - struct fsf_plogi *els_plogi = - (struct fsf_plogi *) status_buffer->payload.data; + struct fsf_status_read_buffer *status_buffer; + struct fc_els_flogi *plogi; - zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn); + status_buffer = (struct fsf_status_read_buffer *) req->data; + plogi = (struct fc_els_flogi *) status_buffer->payload.data; + zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn); } static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) { struct fsf_status_read_buffer *status_buffer = (struct fsf_status_read_buffer *)req->data; - struct fcp_logo *els_logo = - (struct fcp_logo *) status_buffer->payload.data; + struct fc_els_logo *logo = + (struct fc_els_logo *) status_buffer->payload.data; - zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn); + zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn); } /** @@ -233,11 +230,11 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) unsigned int els_type = status_buffer->payload.data[0]; zfcp_dbf_san_incoming_els(fsf_req); - if (els_type == LS_PLOGI) + if (els_type == ELS_PLOGI) zfcp_fc_incoming_plogi(fsf_req); - else if (els_type == LS_LOGO) + else if (els_type == ELS_LOGO) zfcp_fc_incoming_logo(fsf_req); - else if (els_type == LS_RSCN) + else if (els_type == ELS_RSCN) zfcp_fc_incoming_rscn(fsf_req); } @@ -379,33 +376,36 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) * * Evaluate PLOGI playload and copy important fields into zfcp_port structure */ -void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) -{ - port->maxframe_size = plogi->serv_param.common_serv_param[7] | - ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); - if (plogi->serv_param.class1_serv_param[0] & 0x80) +void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) +{ + if (plogi->fl_wwpn != port->wwpn) { + port->d_id = 0; + dev_warn(&port->adapter->ccw_device->dev, + "A port opened with WWPN 0x%016Lx returned data that " + "identifies it as WWPN 0x%016Lx\n", + (unsigned long long) port->wwpn, + (unsigned long long) plogi->fl_wwpn); + return; + } + + port->wwnn = plogi->fl_wwnn; + port->maxframe_size = plogi->fl_csp.sp_bb_data; + + if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID) port->supported_classes |= FC_COS_CLASS1; - if (plogi->serv_param.class2_serv_param[0] & 0x80) + if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID) port->supported_classes |= FC_COS_CLASS2; - if (plogi->serv_param.class3_serv_param[0] & 0x80) + if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID) port->supported_classes |= FC_COS_CLASS3; - if (plogi->serv_param.class4_serv_param[0] & 0x80) + if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID) port->supported_classes |= FC_COS_CLASS4; } -struct zfcp_els_adisc { - struct zfcp_send_els els; - struct scatterlist req; - struct scatterlist resp; - struct zfcp_ls_adisc ls_adisc; - struct zfcp_ls_adisc ls_adisc_acc; -}; - static void zfcp_fc_adisc_handler(unsigned long data) { - struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; + struct zfcp_fc_els_adisc *adisc = (struct zfcp_fc_els_adisc *) data; struct zfcp_port *port = adisc->els.port; - struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc; + struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; if (adisc->els.status) { /* request rejected or timed out */ @@ -415,9 +415,9 @@ static void zfcp_fc_adisc_handler(unsigned long data) } if (!port->wwnn) - port->wwnn = ls_adisc->wwnn; + port->wwnn = adisc_resp->adisc_wwnn; - if ((port->wwpn != ls_adisc->wwpn) || + if ((port->wwpn != adisc_resp->adisc_wwpn) || !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "fcadh_2", NULL); @@ -434,32 +434,33 @@ static void zfcp_fc_adisc_handler(unsigned long data) static int zfcp_fc_adisc(struct zfcp_port *port) { - struct zfcp_els_adisc *adisc; + struct zfcp_fc_els_adisc *adisc; struct zfcp_adapter *adapter = port->adapter; - adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC); + adisc = kzalloc(sizeof(struct zfcp_fc_els_adisc), GFP_ATOMIC); if (!adisc) return -ENOMEM; adisc->els.req = &adisc->req; adisc->els.resp = &adisc->resp; - sg_init_one(adisc->els.req, &adisc->ls_adisc, - sizeof(struct zfcp_ls_adisc)); - sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, - sizeof(struct zfcp_ls_adisc)); + sg_init_one(adisc->els.req, &adisc->adisc_req, + sizeof(struct fc_els_adisc)); + sg_init_one(adisc->els.resp, &adisc->adisc_resp, + sizeof(struct fc_els_adisc)); adisc->els.adapter = adapter; adisc->els.port = port; adisc->els.d_id = port->d_id; adisc->els.handler = zfcp_fc_adisc_handler; adisc->els.handler_data = (unsigned long) adisc; - adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC; + adisc->els.ls_code = adisc->adisc_req.adisc_cmd = ELS_ADISC; /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ - adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host); - adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host); - adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host); + adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); + adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); + hton24(adisc->adisc_req.adisc_port_id, + fc_host_port_id(adapter->scsi_host)); return zfcp_fsf_send_els(&adisc->els); } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 814fc2d2525a..231e231b7fd7 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -10,10 +10,27 @@ #ifndef ZFCP_FC_H #define ZFCP_FC_H +#include #include #include #include +/** + * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC + * @els: data required for issuing els fsf command + * @req: scatterlist entry for ELS ADISC request + * @resp: scatterlist entry for ELS ADISC response + * @adisc_req: ELS ADISC request data + * @adisc_resp: ELS ADISC response data + */ +struct zfcp_fc_els_adisc { + struct zfcp_send_els els; + struct scatterlist req; + struct scatterlist resp; + struct fc_els_adisc adisc_req; + struct fc_els_adisc adisc_resp; +}; + /** * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 5f4cd03797e9..9d7bf965d398 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include +#include #include "zfcp_ext.h" #include "zfcp_fc.h" #include "zfcp_dbf.h" @@ -477,17 +478,22 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) { - struct fsf_qtcb_bottom_config *bottom; + struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; struct zfcp_adapter *adapter = req->adapter; struct Scsi_Host *shost = adapter->scsi_host; + struct fc_els_flogi *nsp, *plogi; - bottom = &req->qtcb->bottom.config; + /* adjust pointers for missing command code */ + nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param + - sizeof(u32)); + plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload + - sizeof(u32)); if (req->data) memcpy(req->data, bottom, sizeof(*bottom)); - fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; - fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; + fc_host_port_name(shost) = nsp->fl_wwpn; + fc_host_node_name(shost) = nsp->fl_wwnn; fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; fc_host_speed(shost) = bottom->fc_link_speed; fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; @@ -501,8 +507,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) switch (bottom->fc_topology) { case FSF_TOPO_P2P: adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; - adapter->peer_wwpn = bottom->plogi_payload.wwpn; - adapter->peer_wwnn = bottom->plogi_payload.wwnn; + adapter->peer_wwpn = plogi->fl_wwpn; + adapter->peer_wwnn = plogi->fl_wwnn; fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case FSF_TOPO_FABRIC: @@ -1068,15 +1074,17 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, int max_sbals) { int ret; + unsigned int fcp_chan_timeout; ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); if (ret) return ret; /* common settings for ct/gs and els requests */ + fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; req->qtcb->bottom.support.service_class = FSF_CLASS_3; - req->qtcb->bottom.support.timeout = 2 * R_A_TOV; - zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ); + req->qtcb->bottom.support.timeout = fcp_chan_timeout; + zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); return 0; } @@ -1151,7 +1159,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - if (port && (send_els->ls_code != ZFCP_LS_ADISC)) + if (port && (send_els->ls_code != ELS_ADISC)) zfcp_fc_test_link(port); /*fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: @@ -1419,7 +1427,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) { struct zfcp_port *port = req->data; struct fsf_qtcb_header *header = &req->qtcb->header; - struct fsf_plogi *plogi; + struct fc_els_flogi *plogi; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto out; @@ -1469,23 +1477,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) * another GID_PN straight after a port has been opened. * Alternately, an ADISC/PDISC ELS should suffice, as well. */ - plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; + plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; if (req->qtcb->bottom.support.els1_length >= - FSF_PLOGI_MIN_LEN) { - if (plogi->serv_param.wwpn != port->wwpn) { - port->d_id = 0; - dev_warn(&port->adapter->ccw_device->dev, - "A port opened with WWPN 0x%016Lx " - "returned data that identifies it as " - "WWPN 0x%016Lx\n", - (unsigned long long) port->wwpn, - (unsigned long long) - plogi->serv_param.wwpn); - } else { - port->wwnn = plogi->serv_param.wwnn; + FSF_PLOGI_MIN_LEN) zfcp_fc_plogi_evaluate(port, plogi); - } - } break; case FSF_UNKNOWN_OP_SUBTYPE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index dcc7c1dbcf58..402e0235a357 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -309,22 +309,7 @@ struct fsf_qtcb_header { u8 res4[16]; } __attribute__ ((packed)); -struct fsf_nport_serv_param { - u8 common_serv_param[16]; - u64 wwpn; - u64 wwnn; - u8 class1_serv_param[16]; - u8 class2_serv_param[16]; - u8 class3_serv_param[16]; - u8 class4_serv_param[16]; - u8 vendor_version_level[16]; -} __attribute__ ((packed)); - #define FSF_PLOGI_MIN_LEN 112 -struct fsf_plogi { - u32 code; - struct fsf_nport_serv_param serv_param; -} __attribute__ ((packed)); #define FSF_FCP_CMND_SIZE 288 #define FSF_FCP_RSP_SIZE 128 @@ -377,13 +362,12 @@ struct fsf_qtcb_bottom_config { u16 timer_interval; u8 res2[8]; u32 s_id; - struct fsf_nport_serv_param nport_serv_param; - u8 reserved_nport_serv_param[16]; + u8 nport_serv_param[128]; u8 res3[8]; u32 adapter_ports; u32 hardware_version; u8 serial_number[32]; - struct fsf_nport_serv_param plogi_payload; + u8 plogi_payload[112]; struct fsf_statistics_info stat_info; u8 res4[112]; } __attribute__ ((packed)); -- cgit v1.2.3-59-g8ed1b From dbf5dfe9dbcecf159139eec25ad256738cbc3715 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:10 +0100 Subject: [SCSI] zfcp: Use common code definitions for FC CT structs Use common code definitions for FC GPN_FT and GID_PN instead of inventing private ones. Move the private structs still required inside zfcp to zfcp_fc header file. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 13 ++-- drivers/s390/scsi/zfcp_dbf.c | 36 +++++------ drivers/s390/scsi/zfcp_def.h | 74 +---------------------- drivers/s390/scsi/zfcp_fc.c | 138 +++++++++++++++++-------------------------- drivers/s390/scsi/zfcp_fc.h | 80 +++++++++++++++++++++++++ 5 files changed, 161 insertions(+), 180 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 107d3f2b6e94..58bb17732f56 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -31,6 +31,7 @@ #include #include #include "zfcp_ext.h" +#include "zfcp_fc.h" #define ZFCP_BUS_ID_SIZE 20 @@ -159,7 +160,7 @@ static int __init zfcp_module_init(void) int retval = -ENOMEM; zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", - sizeof(struct ct_iu_gpn_ft_req)); + sizeof(struct zfcp_fc_gpn_ft_req)); if (!zfcp_data.gpn_ft_cache) goto out; @@ -174,7 +175,7 @@ static int __init zfcp_module_init(void) goto out_sr_cache; zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", - sizeof(struct zfcp_gid_pn_data)); + sizeof(struct zfcp_fc_gid_pn)); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; @@ -407,9 +408,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) if (!adapter->pool.status_read_data) return -ENOMEM; - adapter->pool.gid_pn_data = + adapter->pool.gid_pn = mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); - if (!adapter->pool.gid_pn_data) + if (!adapter->pool.gid_pn) return -ENOMEM; return 0; @@ -429,8 +430,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) mempool_destroy(adapter->pool.status_read_req); if (adapter->pool.status_read_data) mempool_destroy(adapter->pool.status_read_data); - if (adapter->pool.gid_pn_data) - mempool_destroy(adapter->pool.gid_pn_data); + if (adapter->pool.gid_pn) + mempool_destroy(adapter->pool.gid_pn); } /** diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 21e5316e5003..d7a550af4a25 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -684,7 +684,7 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct zfcp_dbf *dbf = adapter->dbf; - struct ct_hdr *hdr = sg_virt(ct->req); + struct fc_ct_hdr *hdr = sg_virt(ct->req); struct zfcp_dbf_san_record *r = &dbf->san_buf; struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; int level = 3; @@ -697,17 +697,17 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) r->fsf_seqno = fsf_req->seq_no; r->s_id = fc_host_port_id(adapter->scsi_host); r->d_id = wka_port->d_id; - oct->cmd_req_code = hdr->cmd_rsp_code; - oct->revision = hdr->revision; - oct->gs_type = hdr->gs_type; - oct->gs_subtype = hdr->gs_subtype; - oct->options = hdr->options; - oct->max_res_size = hdr->max_res_size; - oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), + oct->cmd_req_code = hdr->ct_cmd; + oct->revision = hdr->ct_rev; + oct->gs_type = hdr->ct_fs_type; + oct->gs_subtype = hdr->ct_fs_subtype; + oct->options = hdr->ct_options; + oct->max_res_size = hdr->ct_mr_size; + oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); debug_event(dbf->san, level, r, sizeof(*r)); zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, - (void *)hdr + sizeof(struct ct_hdr), oct->len); + (void *)hdr + sizeof(struct fc_ct_hdr), oct->len); spin_unlock_irqrestore(&dbf->san_lock, flags); } @@ -720,7 +720,7 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; - struct ct_hdr *hdr = sg_virt(ct->resp); + struct fc_ct_hdr *hdr = sg_virt(ct->resp); struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_san_record *r = &dbf->san_buf; struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; @@ -734,17 +734,17 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) r->fsf_seqno = fsf_req->seq_no; r->s_id = wka_port->d_id; r->d_id = fc_host_port_id(adapter->scsi_host); - rct->cmd_rsp_code = hdr->cmd_rsp_code; - rct->revision = hdr->revision; - rct->reason_code = hdr->reason_code; - rct->expl = hdr->reason_code_expl; - rct->vendor_unique = hdr->vendor_unique; - rct->max_res_size = hdr->max_res_size; - rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), + rct->cmd_rsp_code = hdr->ct_cmd; + rct->revision = hdr->ct_rev; + rct->reason_code = hdr->ct_reason; + rct->expl = hdr->ct_explan; + rct->vendor_unique = hdr->ct_vendor; + rct->max_res_size = hdr->ct_mr_size; + rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); debug_event(dbf->san, level, r, sizeof(*r)); zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, - (void *)hdr + sizeof(struct ct_hdr), rct->len); + (void *)hdr + sizeof(struct fc_ct_hdr), rct->len); spin_unlock_irqrestore(&dbf->san_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index fae8f2ebd43f..c64821145475 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -75,25 +75,6 @@ #define ZFCP_DID_MASK 0x00FFFFFF -/* - * FC-GS-2 stuff - */ -#define ZFCP_CT_REVISION 0x01 -#define ZFCP_CT_DIRECTORY_SERVICE 0xFC -#define ZFCP_CT_NAME_SERVER 0x02 -#define ZFCP_CT_SYNCHRONOUS 0x00 -#define ZFCP_CT_SCSI_FCP 0x08 -#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09 -#define ZFCP_CT_GID_PN 0x0121 -#define ZFCP_CT_GPN_FT 0x0172 -#define ZFCP_CT_ACCEPT 0x8002 -#define ZFCP_CT_REJECT 0x8001 - -/* - * FC-GS-4 stuff - */ -#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) - /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ /* @@ -119,9 +100,6 @@ #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 -/* FC-PH/FC-GS well-known address identifiers for generic services */ -#define ZFCP_DID_WKA 0xFFFFF0 - /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 @@ -162,50 +140,10 @@ struct zfcp_adapter_mempool { mempool_t *scsi_abort; mempool_t *status_read_req; mempool_t *status_read_data; - mempool_t *gid_pn_data; + mempool_t *gid_pn; mempool_t *qtcb_pool; }; -/* - * header for CT_IU - */ -struct ct_hdr { - u8 revision; // 0x01 - u8 in_id[3]; // 0x00 - u8 gs_type; // 0xFC Directory Service - u8 gs_subtype; // 0x02 Name Server - u8 options; // 0x00 single bidirectional exchange - u8 reserved0; - u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT - u16 max_res_size; // <= (4096 - 16) / 4 - u8 reserved1; - u8 reason_code; - u8 reason_code_expl; - u8 vendor_unique; -} __attribute__ ((packed)); - -/* nameserver request CT_IU -- for requests where - * a port name is required */ -struct ct_iu_gid_pn_req { - struct ct_hdr header; - u64 wwpn; -} __attribute__ ((packed)); - -/* FS_ACC IU and data unit for GID_PN nameserver request */ -struct ct_iu_gid_pn_resp { - struct ct_hdr header; - u32 d_id; -} __attribute__ ((packed)); - -struct ct_iu_gpn_ft_req { - struct ct_hdr header; - u8 flags; - u8 domain_id_scope; - u8 area_id_scope; - u8 fc4_type; -} __attribute__ ((packed)); - - /** * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct * @wka_port: port where the request is sent to @@ -226,16 +164,6 @@ struct zfcp_send_ct { int status; }; -/* used for name server requests in error recovery */ -struct zfcp_gid_pn_data { - struct zfcp_send_ct ct; - struct scatterlist req; - struct scatterlist resp; - struct ct_iu_gid_pn_req ct_iu_req; - struct ct_iu_gid_pn_resp ct_iu_resp; - struct zfcp_port *port; -}; - /** * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els * @adapter: adapter where request is sent from diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index e03410043cd7..7c306a5ef4dd 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -22,32 +22,6 @@ static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_FAB] = 0x000000, }; -struct gpn_ft_resp_acc { - u8 control; - u8 port_id[3]; - u8 reserved[4]; - u64 wwpn; -} __attribute__ ((packed)); - -#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr)) -#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \ - / sizeof(struct gpn_ft_resp_acc)) -#define ZFCP_GPN_FT_BUFFERS 4 -#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \ - - sizeof(struct ct_hdr)) -#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1) - -struct ct_iu_gpn_ft_resp { - struct ct_hdr header; - struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES]; -} __attribute__ ((packed)); - -struct zfcp_gpn_ft { - struct zfcp_send_ct ct; - struct scatterlist sg_req; - struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS]; -}; - struct zfcp_fc_ns_handler_data { struct completion done; void (*handler)(unsigned long); @@ -251,26 +225,26 @@ static void zfcp_fc_ns_handler(unsigned long data) static void zfcp_fc_ns_gid_pn_eval(unsigned long data) { - struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data; + struct zfcp_fc_gid_pn *gid_pn = (struct zfcp_fc_gid_pn *) data; struct zfcp_send_ct *ct = &gid_pn->ct; - struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req); - struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp); + struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req); + struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp); struct zfcp_port *port = gid_pn->port; if (ct->status) return; - if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) + if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC) return; /* paranoia */ - if (ct_iu_req->wwpn != port->wwpn) + if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn) return; /* looks like a valid d_id */ - port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; + port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); } static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, - struct zfcp_gid_pn_data *gid_pn) + struct zfcp_fc_gid_pn *gid_pn) { struct zfcp_adapter *adapter = port->adapter; struct zfcp_fc_ns_handler_data compl_rec; @@ -281,21 +255,21 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; - gid_pn->ct.req = &gid_pn->req; - gid_pn->ct.resp = &gid_pn->resp; - sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, - sizeof(struct ct_iu_gid_pn_req)); - sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, - sizeof(struct ct_iu_gid_pn_resp)); + gid_pn->ct.req = &gid_pn->sg_req; + gid_pn->ct.resp = &gid_pn->sg_resp; + sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, + sizeof(struct zfcp_fc_gid_pn_req)); + sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp, + sizeof(struct zfcp_fc_gid_pn_resp)); /* setup nameserver request */ - gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION; - gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; - gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; - gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; - gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; - gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; - gid_pn->ct_iu_req.wwpn = port->wwpn; + gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV; + gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR; + gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; + gid_pn->gid_pn_req.ct_hdr.ct_options = 0; + gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN; + gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4; + gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; init_completion(&compl_rec.done); compl_rec.handler = zfcp_fc_ns_gid_pn_eval; @@ -314,10 +288,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; - struct zfcp_gid_pn_data *gid_pn; + struct zfcp_fc_gid_pn *gid_pn; struct zfcp_adapter *adapter = port->adapter; - gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); + gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); if (!gid_pn) return -ENOMEM; @@ -331,7 +305,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) zfcp_fc_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.gid_pn_data); + mempool_free(gid_pn, adapter->pool.gid_pn); return ret; } @@ -508,7 +482,7 @@ void zfcp_fc_test_link(struct zfcp_port *port) put_device(&port->sysfs_device); } -static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) +static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) { struct scatterlist *sg = &gpn_ft->sg_req; @@ -518,10 +492,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) kfree(gpn_ft); } -static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) +static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num) { - struct zfcp_gpn_ft *gpn_ft; - struct ct_iu_gpn_ft_req *req; + struct zfcp_fc_gpn_ft *gpn_ft; + struct zfcp_fc_gpn_ft_req *req; gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); if (!gpn_ft) @@ -544,25 +518,24 @@ out: } -static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, +static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, struct zfcp_adapter *adapter, int max_bytes) { struct zfcp_send_ct *ct = &gpn_ft->ct; - struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); + struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); struct zfcp_fc_ns_handler_data compl_rec; int ret; /* prepare CT IU for GPN_FT */ - req->header.revision = ZFCP_CT_REVISION; - req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; - req->header.gs_subtype = ZFCP_CT_NAME_SERVER; - req->header.options = ZFCP_CT_SYNCHRONOUS; - req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; - req->header.max_res_size = max_bytes / 4; - req->flags = 0; - req->domain_id_scope = 0; - req->area_id_scope = 0; - req->fc4_type = ZFCP_CT_SCSI_FCP; + req->ct_hdr.ct_rev = FC_CT_REV; + req->ct_hdr.ct_fs_type = FC_FST_DIR; + req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; + req->ct_hdr.ct_options = 0; + req->ct_hdr.ct_cmd = FC_NS_GPN_FT; + req->ct_hdr.ct_mr_size = max_bytes / 4; + req->gpn_ft.fn_domain_id_scope = 0; + req->gpn_ft.fn_area_id_scope = 0; + req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; /* prepare zfcp_send_ct */ ct->wka_port = &adapter->gs->ds; @@ -593,12 +566,12 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) list_move_tail(&port->list, lh); } -static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) +static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, int max_entries) { struct zfcp_send_ct *ct = &gpn_ft->ct; struct scatterlist *sg = gpn_ft->sg_resp; - struct ct_hdr *hdr = sg_virt(sg); - struct gpn_ft_resp_acc *acc = sg_virt(sg); + struct fc_ct_hdr *hdr = sg_virt(sg); + struct fc_gpn_ft_resp *acc = sg_virt(sg); struct zfcp_adapter *adapter = ct->wka_port->adapter; struct zfcp_port *port, *tmp; unsigned long flags; @@ -609,38 +582,37 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) if (ct->status) return -EIO; - if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) { - if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD) + if (hdr->ct_cmd != FC_FS_ACC) { + if (hdr->ct_reason == FC_BA_RJT_UNABLE) return -EAGAIN; /* might be a temporary condition */ return -EIO; } - if (hdr->max_res_size) { + if (hdr->ct_mr_size) { dev_warn(&adapter->ccw_device->dev, "The name server reported %d words residual data\n", - hdr->max_res_size); + hdr->ct_mr_size); return -E2BIG; } /* first entry is the header */ for (x = 1; x < max_entries && !last; x++) { - if (x % (ZFCP_GPN_FT_ENTRIES + 1)) + if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) acc++; else acc = sg_virt(++sg); - last = acc->control & 0x80; - d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | - acc->port_id[2]; + last = acc->fp_flags & FC_NS_FID_LAST; + d_id = ntoh24(acc->fp_fid); /* don't attach ports with a well known address */ - if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA) + if (d_id >= FC_FID_WELL_KNOWN_BASE) continue; /* skip the adapter's port and known remote ports */ - if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) + if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host)) continue; - port = zfcp_port_enqueue(adapter, acc->wwpn, + port = zfcp_port_enqueue(adapter, acc->fp_wwpn, ZFCP_STATUS_COMMON_NOESC, d_id); if (!IS_ERR(port)) zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); @@ -672,13 +644,13 @@ void zfcp_fc_scan_ports(struct work_struct *work) struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, scan_work); int ret, i; - struct zfcp_gpn_ft *gpn_ft; + struct zfcp_fc_gpn_ft *gpn_ft; int chain, max_entries, buf_num, max_bytes; chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; - buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1; - max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES; - max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE; + buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1; + max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; + max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE; if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 231e231b7fd7..12fc6ebbc244 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -12,9 +12,89 @@ #include #include +#include #include #include +#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr)) +#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \ + / sizeof(struct fc_gpn_ft_resp)) +#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */ + +#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \ + - sizeof(struct fc_ct_hdr)) +#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ + (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) + +/** + * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request + * @ct_hdr: FC GS common transport header + * @gid_pn: GID_PN request + */ +struct zfcp_fc_gid_pn_req { + struct fc_ct_hdr ct_hdr; + struct fc_ns_gid_pn gid_pn; +} __packed; + +/** + * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response + * @ct_hdr: FC GS common transport header + * @gid_pn: GID_PN response + */ +struct zfcp_fc_gid_pn_resp { + struct fc_ct_hdr ct_hdr; + struct fc_gid_pn_resp gid_pn; +} __packed; + +/** + * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request + * @ct: data passed to zfcp_fsf for issuing fsf request + * @sg_req: scatterlist entry for request data + * @sg_resp: scatterlist entry for response data + * @gid_pn_req: GID_PN request data + * @gid_pn_resp: GID_PN response data + */ +struct zfcp_fc_gid_pn { + struct zfcp_send_ct ct; + struct scatterlist sg_req; + struct scatterlist sg_resp; + struct zfcp_fc_gid_pn_req gid_pn_req; + struct zfcp_fc_gid_pn_resp gid_pn_resp; + struct zfcp_port *port; +}; + +/** + * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request + * @ct_hdr: FC GS common transport header + * @gpn_ft: GPN_FT request + */ +struct zfcp_fc_gpn_ft_req { + struct fc_ct_hdr ct_hdr; + struct fc_ns_gid_ft gpn_ft; +} __packed; + +/** + * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response + * @ct_hdr: FC GS common transport header + * @gpn_ft: Array of gpn_ft response data to fill one memory page + */ +struct zfcp_fc_gpn_ft_resp { + struct fc_ct_hdr ct_hdr; + struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE]; +} __packed; + +/** + * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request + * @ct: data passed to zfcp_fsf for issuing fsf request + * @sg_req: scatter list entry for gpn_ft request + * @sg_resp: scatter list entries for gpn_ft responses (per memory page) + */ +struct zfcp_fc_gpn_ft { + struct zfcp_send_ct ct; + struct scatterlist sg_req; + struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; +}; + /** * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC * @els: data required for issuing els fsf command -- cgit v1.2.3-59-g8ed1b From bd0072ecc449fb2ea8f6a2c9f6ff308f3ae0b078 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:11 +0100 Subject: [SCSI] zfcp: Move WKA port to zfcp FC code The well-known-address (WKA) port handling code is part of the FC code in zfcp. Move everything WKA related to the zfcp_fc files and use the common zfcp_fc prefix for structs and functions. Drop the unused key management service while renaming the struct, no request could ever reach this service in zfcp and it is obsolete anyway. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 5 +++-- drivers/s390/scsi/zfcp_def.h | 31 ++------------------------- drivers/s390/scsi/zfcp_ext.h | 6 +++--- drivers/s390/scsi/zfcp_fc.c | 44 +++++++++++++++++++------------------- drivers/s390/scsi/zfcp_fc.h | 50 ++++++++++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_fsf.c | 22 +++++++++---------- 6 files changed, 90 insertions(+), 68 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index d7a550af4a25..e945344ff711 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -13,6 +13,7 @@ #include #include "zfcp_dbf.h" #include "zfcp_ext.h" +#include "zfcp_fc.h" static u32 dbfsize = 4; @@ -681,7 +682,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; - struct zfcp_wka_port *wka_port = ct->wka_port; + struct zfcp_fc_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct zfcp_dbf *dbf = adapter->dbf; struct fc_ct_hdr *hdr = sg_virt(ct->req); @@ -718,7 +719,7 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; - struct zfcp_wka_port *wka_port = ct->wka_port; + struct zfcp_fc_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct fc_ct_hdr *hdr = sg_virt(ct->resp); struct zfcp_dbf *dbf = adapter->dbf; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index c64821145475..c00aa2b174a1 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -104,14 +104,6 @@ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 -/* well known address (WKA) port status*/ -enum zfcp_wka_status { - ZFCP_WKA_PORT_OFFLINE, - ZFCP_WKA_PORT_CLOSING, - ZFCP_WKA_PORT_OPENING, - ZFCP_WKA_PORT_ONLINE, -}; - /* logical unit status */ #define ZFCP_STATUS_UNIT_SHARED 0x00000004 #define ZFCP_STATUS_UNIT_READONLY 0x00000008 @@ -155,7 +147,7 @@ struct zfcp_adapter_mempool { * @status: used to pass error status to calling function */ struct zfcp_send_ct { - struct zfcp_wka_port *wka_port; + struct zfcp_fc_wka_port *wka_port; struct scatterlist *req; struct scatterlist *resp; void (*handler)(unsigned long); @@ -190,25 +182,6 @@ struct zfcp_send_els { int status; }; -struct zfcp_wka_port { - struct zfcp_adapter *adapter; - wait_queue_head_t completion_wq; - enum zfcp_wka_status status; - atomic_t refcount; - u32 d_id; - u32 handle; - struct mutex mutex; - struct delayed_work work; -}; - -struct zfcp_wka_ports { - struct zfcp_wka_port ms; /* management service */ - struct zfcp_wka_port ts; /* time service */ - struct zfcp_wka_port ds; /* directory service */ - struct zfcp_wka_port as; /* alias service */ - struct zfcp_wka_port ks; /* key distribution service */ -}; - struct zfcp_qdio_queue { struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; u8 first; /* index of next free bfr in queue */ @@ -309,7 +282,7 @@ struct zfcp_adapter { u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ struct task_struct *erp_thread; - struct zfcp_wka_ports *gs; /* generic services */ + struct zfcp_fc_wka_ports *gs; /* generic services */ struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ struct fc_host_statistics *fc_stats; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index c2b23b5a3d0a..6a2d6e390b68 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -103,7 +103,7 @@ extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *); extern void zfcp_fc_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); -extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); +extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *); extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); @@ -111,8 +111,8 @@ extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); -extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *); -extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *); +extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); +extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_port(struct zfcp_erp_action *); extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 7c306a5ef4dd..d6d1e78ba0f9 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -28,25 +28,25 @@ struct zfcp_fc_ns_handler_data { unsigned long handler_data; }; -static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port) +static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) return -ERESTARTSYS; - if (wka_port->status == ZFCP_WKA_PORT_OFFLINE || - wka_port->status == ZFCP_WKA_PORT_CLOSING) { - wka_port->status = ZFCP_WKA_PORT_OPENING; + if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE || + wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) { + wka_port->status = ZFCP_FC_WKA_PORT_OPENING; if (zfcp_fsf_open_wka_port(wka_port)) - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; } mutex_unlock(&wka_port->mutex); wait_event(wka_port->completion_wq, - wka_port->status == ZFCP_WKA_PORT_ONLINE || - wka_port->status == ZFCP_WKA_PORT_OFFLINE); + wka_port->status == ZFCP_FC_WKA_PORT_ONLINE || + wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE); - if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { + if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) { atomic_inc(&wka_port->refcount); return 0; } @@ -56,24 +56,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port) static void zfcp_fc_wka_port_offline(struct work_struct *work) { struct delayed_work *dw = to_delayed_work(work); - struct zfcp_wka_port *wka_port = - container_of(dw, struct zfcp_wka_port, work); + struct zfcp_fc_wka_port *wka_port = + container_of(dw, struct zfcp_fc_wka_port, work); mutex_lock(&wka_port->mutex); if ((atomic_read(&wka_port->refcount) != 0) || - (wka_port->status != ZFCP_WKA_PORT_ONLINE)) + (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE)) goto out; - wka_port->status = ZFCP_WKA_PORT_CLOSING; + wka_port->status = ZFCP_FC_WKA_PORT_CLOSING; if (zfcp_fsf_close_wka_port(wka_port)) { - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; wake_up(&wka_port->completion_wq); } out: mutex_unlock(&wka_port->mutex); } -static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) +static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port) { if (atomic_dec_return(&wka_port->refcount) != 0) return; @@ -81,7 +81,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) schedule_delayed_work(&wka_port->work, HZ / 100); } -static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, +static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id, struct zfcp_adapter *adapter) { init_waitqueue_head(&wka_port->completion_wq); @@ -89,21 +89,21 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, wka_port->adapter = adapter; wka_port->d_id = d_id; - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; atomic_set(&wka_port->refcount, 0); mutex_init(&wka_port->mutex); INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); } -static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) +static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka) { cancel_delayed_work_sync(&wka->work); mutex_lock(&wka->mutex); - wka->status = ZFCP_WKA_PORT_OFFLINE; + wka->status = ZFCP_FC_WKA_PORT_OFFLINE; mutex_unlock(&wka->mutex); } -void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) +void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs) { if (!gs) return; @@ -111,7 +111,6 @@ void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) zfcp_fc_wka_port_force_offline(&gs->ts); zfcp_fc_wka_port_force_offline(&gs->ds); zfcp_fc_wka_port_force_offline(&gs->as); - zfcp_fc_wka_port_force_offline(&gs->ks); } static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, @@ -834,9 +833,9 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) { - struct zfcp_wka_ports *wka_ports; + struct zfcp_fc_wka_ports *wka_ports; - wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); + wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL); if (!wka_ports) return -ENOMEM; @@ -845,7 +844,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); - zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter); return 0; } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 12fc6ebbc244..9c787e043ff8 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -111,6 +111,56 @@ struct zfcp_fc_els_adisc { struct fc_els_adisc adisc_resp; }; +/** + * enum zfcp_fc_wka_status - FC WKA port status in zfcp + * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use + * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending + * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending + * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid + */ +enum zfcp_fc_wka_status { + ZFCP_FC_WKA_PORT_OFFLINE, + ZFCP_FC_WKA_PORT_CLOSING, + ZFCP_FC_WKA_PORT_OPENING, + ZFCP_FC_WKA_PORT_ONLINE, +}; + +/** + * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port + * @adapter: Pointer to adapter structure this WKA port belongs to + * @completion_wq: Wait for completion of open/close command + * @status: Current status of WKA port + * @refcount: Reference count to keep port open as long as it is in use + * @d_id: FC destination id or well-known-address + * @handle: FSF handle for the open WKA port + * @mutex: Mutex used during opening/closing state changes + * @work: For delaying the closing of the WKA port + */ +struct zfcp_fc_wka_port { + struct zfcp_adapter *adapter; + wait_queue_head_t completion_wq; + enum zfcp_fc_wka_status status; + atomic_t refcount; + u32 d_id; + u32 handle; + struct mutex mutex; + struct delayed_work work; +}; + +/** + * struct zfcp_fc_wka_ports - Data structures for FC generic services + * @ms: FC Management service + * @ts: FC time service + * @ds: FC directory service + * @as: FC alias service + */ +struct zfcp_fc_wka_ports { + struct zfcp_fc_wka_port ms; + struct zfcp_fc_wka_port ts; + struct zfcp_fc_wka_port ds; + struct zfcp_fc_wka_port as; +}; + /** * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9d7bf965d398..9ada555ca5a8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1096,7 +1096,7 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, */ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) { - struct zfcp_wka_port *wka_port = ct->wka_port; + struct zfcp_fc_wka_port *wka_port = ct->wka_port; struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int ret = -EIO; @@ -1610,11 +1610,11 @@ out: static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) { - struct zfcp_wka_port *wka_port = req->data; + struct zfcp_fc_wka_port *wka_port = req->data; struct fsf_qtcb_header *header = &req->qtcb->header; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; goto out; } @@ -1627,13 +1627,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; /* fall through */ case FSF_ACCESS_DENIED: - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; break; case FSF_GOOD: wka_port->handle = header->port_handle; /* fall through */ case FSF_PORT_ALREADY_OPEN: - wka_port->status = ZFCP_WKA_PORT_ONLINE; + wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; } out: wake_up(&wka_port->completion_wq); @@ -1641,10 +1641,10 @@ out: /** * zfcp_fsf_open_wka_port - create and send open wka-port request - * @wka_port: pointer to struct zfcp_wka_port + * @wka_port: pointer to struct zfcp_fc_wka_port * Returns: 0 on success, error otherwise */ -int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) +int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = wka_port->adapter->qdio; @@ -1683,23 +1683,23 @@ out: static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) { - struct zfcp_wka_port *wka_port = req->data; + struct zfcp_fc_wka_port *wka_port = req->data; if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); } - wka_port->status = ZFCP_WKA_PORT_OFFLINE; + wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; wake_up(&wka_port->completion_wq); } /** * zfcp_fsf_close_wka_port - create and send close wka port request - * @erp_action: pointer to struct zfcp_erp_action + * @wka_port: WKA port to open * Returns: 0 on success, error otherwise */ -int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) +int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = wka_port->adapter->qdio; -- cgit v1.2.3-59-g8ed1b From 800c0cad962dcf630cabf3efdc5983619e73d4c9 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:12 +0100 Subject: [SCSI] zfcp: Remove ZFCP_DID_MASK Instead of assigning 4 bytes with the highest byte masked out, use a 3 byte array with the ntoh24 and h24ton helper functions, thus eliminating the need for the ZFCP_DID_MASK. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 4 ++-- drivers/s390/scsi/zfcp_def.h | 4 ---- drivers/s390/scsi/zfcp_fsf.c | 12 ++++++------ drivers/s390/scsi/zfcp_fsf.h | 15 +++++++++------ 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index e945344ff711..517f196b4c52 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -178,7 +178,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, case FSF_QTCB_SEND_ELS: send_els = (struct zfcp_send_els *)fsf_req->data; - response->u.els.d_id = qtcb->bottom.support.d_id; + response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id); response->u.els.ls_code = send_els->ls_code >> 24; break; @@ -812,7 +812,7 @@ void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) int length = (int)buf->length - (int)((void *)&buf->payload - (void *)buf); - zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, + zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), fc_host_port_id(adapter->scsi_host), buf->payload.data[0], (void *)buf->payload.data, length); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index c00aa2b174a1..ea11b4e45cdc 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -71,10 +71,6 @@ /* timeout value for "default timer" for fsf requests */ #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) -/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ - -#define ZFCP_DID_MASK 0x00FFFFFF - /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ /* diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9ada555ca5a8..057c93777f92 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -128,7 +128,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) struct fsf_status_read_buffer *sr_buf = req->data; struct zfcp_adapter *adapter = req->adapter; struct zfcp_port *port; - int d_id = sr_buf->d_id & ZFCP_DID_MASK; + int d_id = ntoh24(sr_buf->d_id); read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) @@ -494,7 +494,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_name(shost) = nsp->fl_wwpn; fc_host_node_name(shost) = nsp->fl_wwnn; - fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; + fc_host_port_id(shost) = ntoh24(bottom->s_id); fc_host_speed(shost) = bottom->fc_link_speed; fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; @@ -506,7 +506,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) switch (bottom->fc_topology) { case FSF_TOPO_P2P: - adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; + adapter->peer_d_id = ntoh24(bottom->peer_d_id); adapter->peer_wwpn = plogi->fl_wwpn; adapter->peer_wwnn = plogi->fl_wwnn; fc_host_port_type(shost) = FC_PORTTYPE_PTP; @@ -1216,7 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) if (ret) goto failed_send; - req->qtcb->bottom.support.d_id = els->d_id; + hton24(req->qtcb->bottom.support.d_id, els->d_id); req->handler = zfcp_fsf_send_els_handler; req->data = els; @@ -1522,7 +1522,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_open_port_handler; - req->qtcb->bottom.support.d_id = port->d_id; + hton24(req->qtcb->bottom.support.d_id, port->d_id); req->data = port; req->erp_action = erp_action; erp_action->fsf_req = req; @@ -1669,7 +1669,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_open_wka_port_handler; - req->qtcb->bottom.support.d_id = wka_port->d_id; + hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); req->data = wka_port; zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 402e0235a357..206b7eaff5a0 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -11,6 +11,7 @@ #include #include +#include #define FSF_QTCB_CURRENT_VERSION 0x00000001 @@ -228,7 +229,8 @@ struct fsf_status_read_buffer { u32 length; u32 res1; struct fsf_queue_designator queue_designator; - u32 d_id; + u8 res2; + u8 d_id[3]; u32 class; u64 fcp_lun; u8 res3[24]; @@ -327,8 +329,8 @@ struct fsf_qtcb_bottom_io { struct fsf_qtcb_bottom_support { u32 operation_subtype; - u8 res1[12]; - u32 d_id; + u8 res1[13]; + u8 d_id[3]; u32 option; u64 fcp_lun; u64 res2; @@ -357,11 +359,12 @@ struct fsf_qtcb_bottom_config { u32 fc_topology; u32 fc_link_speed; u32 adapter_type; - u32 peer_d_id; + u8 res0; + u8 peer_d_id[3]; u8 res1[2]; u16 timer_interval; - u8 res2[8]; - u32 s_id; + u8 res2[9]; + u8 s_id[3]; u8 nport_serv_param[128]; u8 res3[8]; u32 adapter_ports; -- cgit v1.2.3-59-g8ed1b From 7c7dc196814b9e1d5cc254dc579a5fa78ae524f7 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:13 +0100 Subject: [SCSI] zfcp: Simplify handling of ct and els requests Remove some redundancies in FC related code and trace: - drop redundant data from SAN trace (local s_id that only changes during link down, ls_code that is already part of payload, d_id in ct response trace that is always the same as in ct request trace) - use one common fsf struct to hold zfcp data for ct and els requests - leverage common fsf struct for FC passthrough job data, allocate it with dd_bsg_data for passthrough requests and unify common code for ct and els passthrough request - simplify callback handling in zfcp_fc Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 56 ++++------ drivers/s390/scsi/zfcp_dbf.h | 7 +- drivers/s390/scsi/zfcp_def.h | 46 -------- drivers/s390/scsi/zfcp_ext.h | 12 +- drivers/s390/scsi/zfcp_fc.c | 253 +++++++++++++++--------------------------- drivers/s390/scsi/zfcp_fc.h | 7 +- drivers/s390/scsi/zfcp_fsf.c | 28 +++-- drivers/s390/scsi/zfcp_fsf.h | 18 +++ drivers/s390/scsi/zfcp_scsi.c | 18 +-- 9 files changed, 160 insertions(+), 285 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 517f196b4c52..84450955ae11 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -179,7 +179,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, case FSF_QTCB_SEND_ELS: send_els = (struct zfcp_send_els *)fsf_req->data; response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id); - response->u.els.ls_code = send_els->ls_code >> 24; break; case FSF_QTCB_ABORT_FCP_CMND: @@ -349,7 +348,6 @@ static void zfcp_dbf_hba_view_response(char **p, case FSF_QTCB_SEND_ELS: zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); - zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code); break; case FSF_QTCB_ABORT_FCP_CMND: @@ -678,12 +676,12 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) /** * zfcp_dbf_san_ct_request - trace event for issued CT request * @fsf_req: request containing issued CT data + * @d_id: destination id where ct request is sent to */ -void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id) { - struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; - struct zfcp_fc_wka_port *wka_port = ct->wka_port; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; + struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_dbf *dbf = adapter->dbf; struct fc_ct_hdr *hdr = sg_virt(ct->req); struct zfcp_dbf_san_record *r = &dbf->san_buf; @@ -696,8 +694,7 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; r->fsf_seqno = fsf_req->seq_no; - r->s_id = fc_host_port_id(adapter->scsi_host); - r->d_id = wka_port->d_id; + oct->d_id = d_id; oct->cmd_req_code = hdr->ct_cmd; oct->revision = hdr->ct_rev; oct->gs_type = hdr->ct_fs_type; @@ -718,9 +715,8 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) */ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) { - struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; - struct zfcp_fc_wka_port *wka_port = ct->wka_port; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; + struct zfcp_adapter *adapter = fsf_req->adapter; struct fc_ct_hdr *hdr = sg_virt(ct->resp); struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_san_record *r = &dbf->san_buf; @@ -733,8 +729,6 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; r->fsf_seqno = fsf_req->seq_no; - r->s_id = wka_port->d_id; - r->d_id = fc_host_port_id(adapter->scsi_host); rct->cmd_rsp_code = hdr->ct_cmd; rct->revision = hdr->ct_rev; rct->reason_code = hdr->ct_reason; @@ -750,8 +744,8 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) } static void zfcp_dbf_san_els(const char *tag, int level, - struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, - u8 ls_code, void *buffer, int buflen) + struct zfcp_fsf_req *fsf_req, u32 d_id, + void *buffer, int buflen) { struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_dbf *dbf = adapter->dbf; @@ -763,9 +757,7 @@ static void zfcp_dbf_san_els(const char *tag, int level, strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); rec->fsf_reqid = fsf_req->req_id; rec->fsf_seqno = fsf_req->seq_no; - rec->s_id = s_id; - rec->d_id = d_id; - rec->u.els.ls_code = ls_code; + rec->u.els.d_id = d_id; debug_event(dbf->san, level, rec, sizeof(*rec)); zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); @@ -778,12 +770,11 @@ static void zfcp_dbf_san_els(const char *tag, int level, */ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) { - struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; + struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; + u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); - zfcp_dbf_san_els("oels", 2, fsf_req, - fc_host_port_id(els->adapter->scsi_host), - els->d_id, *(u8 *) sg_virt(els->req), - sg_virt(els->req), els->req->length); + zfcp_dbf_san_els("oels", 2, fsf_req, d_id, + sg_virt(els->req), els->req->length); } /** @@ -792,12 +783,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) */ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) { - struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; + struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; + u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); - zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, - fc_host_port_id(els->adapter->scsi_host), - *(u8 *)sg_virt(els->req), sg_virt(els->resp), - els->resp->length); + zfcp_dbf_san_els("rels", 2, fsf_req, d_id, + sg_virt(els->resp), els->resp->length); } /** @@ -806,16 +796,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) */ void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) { - struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_status_read_buffer *buf = (struct fsf_status_read_buffer *)fsf_req->data; int length = (int)buf->length - (int)((void *)&buf->payload - (void *)buf); zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), - fc_host_port_id(adapter->scsi_host), - buf->payload.data[0], (void *)buf->payload.data, - length); + (void *)buf->payload.data, length); } static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, @@ -830,11 +817,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_tag(&p, "tag", r->tag); zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); - zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id); - zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; + zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id); zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); @@ -853,7 +839,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { struct zfcp_dbf_san_record_els *els = &r->u.els; - zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); + zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id); } return p - out_buf; } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index c3e25702df5b..8b7fd9a1033e 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -123,7 +123,6 @@ struct zfcp_dbf_hba_record_response { } unit; struct { u32 d_id; - u8 ls_code; } els; } u; } __attribute__ ((packed)); @@ -167,6 +166,7 @@ struct zfcp_dbf_san_record_ct_request { u8 options; u16 max_res_size; u32 len; + u32 d_id; } __attribute__ ((packed)); struct zfcp_dbf_san_record_ct_response { @@ -180,16 +180,13 @@ struct zfcp_dbf_san_record_ct_response { } __attribute__ ((packed)); struct zfcp_dbf_san_record_els { - u8 ls_code; - u32 len; + u32 d_id; } __attribute__ ((packed)); struct zfcp_dbf_san_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u64 fsf_reqid; u32 fsf_seqno; - u32 s_id; - u32 d_id; union { struct zfcp_dbf_san_record_ct_request ct_req; struct zfcp_dbf_san_record_ct_response ct_resp; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index ea11b4e45cdc..21b29804a7a6 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -132,52 +132,6 @@ struct zfcp_adapter_mempool { mempool_t *qtcb_pool; }; -/** - * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct - * @wka_port: port where the request is sent to - * @req: scatter-gather list for request - * @resp: scatter-gather list for response - * @handler: handler function (called for response to the request) - * @handler_data: data passed to handler function - * @completion: completion for synchronization purposes - * @status: used to pass error status to calling function - */ -struct zfcp_send_ct { - struct zfcp_fc_wka_port *wka_port; - struct scatterlist *req; - struct scatterlist *resp; - void (*handler)(unsigned long); - unsigned long handler_data; - struct completion *completion; - int status; -}; - -/** - * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els - * @adapter: adapter where request is sent from - * @port: port where ELS is destinated (port reference count has to be increased) - * @d_id: destiniation id of port where request is sent to - * @req: scatter-gather list for request - * @resp: scatter-gather list for response - * @handler: handler function (called for response to the request) - * @handler_data: data passed to handler function - * @completion: completion for synchronization purposes - * @ls_code: hex code of ELS command - * @status: used to pass error status to calling function - */ -struct zfcp_send_els { - struct zfcp_adapter *adapter; - struct zfcp_port *port; - u32 d_id; - struct scatterlist *req; - struct scatterlist *resp; - void (*handler)(unsigned long); - unsigned long handler_data; - struct completion *completion; - int ls_code; - int status; -}; - struct zfcp_qdio_queue { struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; u8 first; /* index of next free bfr in queue */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6a2d6e390b68..03dec832b465 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -12,6 +12,7 @@ #include #include #include "zfcp_def.h" +#include "zfcp_fc.h" /* zfcp_aux.c */ extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); @@ -55,7 +56,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, struct fsf_status_read_buffer *); extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); -extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); @@ -106,8 +107,7 @@ extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *); extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); -extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); -extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); +extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); @@ -128,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); -extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); -extern int zfcp_fsf_send_els(struct zfcp_send_els *); +extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, + mempool_t *); +extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, + struct zfcp_fsf_ct_els *); extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, struct scsi_cmnd *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index d6d1e78ba0f9..6d5ccc053e3a 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -22,12 +22,6 @@ static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_FAB] = 0x000000, }; -struct zfcp_fc_ns_handler_data { - struct completion done; - void (*handler)(unsigned long); - unsigned long handler_data; -}; - static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) @@ -211,21 +205,10 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) zfcp_fc_incoming_rscn(fsf_req); } -static void zfcp_fc_ns_handler(unsigned long data) -{ - struct zfcp_fc_ns_handler_data *compl_rec = - (struct zfcp_fc_ns_handler_data *) data; - - if (compl_rec->handler) - compl_rec->handler(compl_rec->handler_data); - - complete(&compl_rec->done); -} - -static void zfcp_fc_ns_gid_pn_eval(unsigned long data) +static void zfcp_fc_ns_gid_pn_eval(void *data) { - struct zfcp_fc_gid_pn *gid_pn = (struct zfcp_fc_gid_pn *) data; - struct zfcp_send_ct *ct = &gid_pn->ct; + struct zfcp_fc_gid_pn *gid_pn = data; + struct zfcp_fsf_ct_els *ct = &gid_pn->ct; struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req); struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp); struct zfcp_port *port = gid_pn->port; @@ -242,18 +225,22 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data) port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); } +static void zfcp_fc_complete(void *data) +{ + complete(data); +} + static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, struct zfcp_fc_gid_pn *gid_pn) { struct zfcp_adapter *adapter = port->adapter; - struct zfcp_fc_ns_handler_data compl_rec; + DECLARE_COMPLETION_ONSTACK(completion); int ret; /* setup parameters for send generic command */ gid_pn->port = port; - gid_pn->ct.wka_port = &adapter->gs->ds; - gid_pn->ct.handler = zfcp_fc_ns_handler; - gid_pn->ct.handler_data = (unsigned long) &compl_rec; + gid_pn->ct.handler = zfcp_fc_complete; + gid_pn->ct.handler_data = &completion; gid_pn->ct.req = &gid_pn->sg_req; gid_pn->ct.resp = &gid_pn->sg_resp; sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, @@ -270,12 +257,12 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4; gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; - init_completion(&compl_rec.done); - compl_rec.handler = zfcp_fc_ns_gid_pn_eval; - compl_rec.handler_data = (unsigned long) gid_pn; - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); - if (!ret) - wait_for_completion(&compl_rec.done); + ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, + adapter->pool.gid_pn_req); + if (!ret) { + wait_for_completion(&completion); + zfcp_fc_ns_gid_pn_eval(gid_pn); + } return ret; } @@ -374,9 +361,9 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) port->supported_classes |= FC_COS_CLASS4; } -static void zfcp_fc_adisc_handler(unsigned long data) +static void zfcp_fc_adisc_handler(void *data) { - struct zfcp_fc_els_adisc *adisc = (struct zfcp_fc_els_adisc *) data; + struct zfcp_fc_els_adisc *adisc = data; struct zfcp_port *port = adisc->els.port; struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; @@ -414,6 +401,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port) if (!adisc) return -ENOMEM; + adisc->els.port = port; adisc->els.req = &adisc->req; adisc->els.resp = &adisc->resp; sg_init_one(adisc->els.req, &adisc->adisc_req, @@ -421,21 +409,18 @@ static int zfcp_fc_adisc(struct zfcp_port *port) sg_init_one(adisc->els.resp, &adisc->adisc_resp, sizeof(struct fc_els_adisc)); - adisc->els.adapter = adapter; - adisc->els.port = port; - adisc->els.d_id = port->d_id; adisc->els.handler = zfcp_fc_adisc_handler; - adisc->els.handler_data = (unsigned long) adisc; - adisc->els.ls_code = adisc->adisc_req.adisc_cmd = ELS_ADISC; + adisc->els.handler_data = adisc; /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); + adisc->adisc_req.adisc_cmd = ELS_ADISC; hton24(adisc->adisc_req.adisc_port_id, fc_host_port_id(adapter->scsi_host)); - return zfcp_fsf_send_els(&adisc->els); + return zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); } void zfcp_fc_link_test_work(struct work_struct *work) @@ -520,9 +505,9 @@ out: static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, struct zfcp_adapter *adapter, int max_bytes) { - struct zfcp_send_ct *ct = &gpn_ft->ct; + struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); - struct zfcp_fc_ns_handler_data compl_rec; + DECLARE_COMPLETION_ONSTACK(completion); int ret; /* prepare CT IU for GPN_FT */ @@ -537,17 +522,14 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; /* prepare zfcp_send_ct */ - ct->wka_port = &adapter->gs->ds; - ct->handler = zfcp_fc_ns_handler; - ct->handler_data = (unsigned long)&compl_rec; + ct->handler = zfcp_fc_complete; + ct->handler_data = &completion; ct->req = &gpn_ft->sg_req; ct->resp = gpn_ft->sg_resp; - init_completion(&compl_rec.done); - compl_rec.handler = NULL; - ret = zfcp_fsf_send_ct(ct, NULL); + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); if (!ret) - wait_for_completion(&compl_rec.done); + wait_for_completion(&completion); return ret; } @@ -565,13 +547,13 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) list_move_tail(&port->list, lh); } -static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, int max_entries) +static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, + struct zfcp_adapter *adapter, int max_entries) { - struct zfcp_send_ct *ct = &gpn_ft->ct; + struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; struct scatterlist *sg = gpn_ft->sg_resp; struct fc_ct_hdr *hdr = sg_virt(sg); struct fc_gpn_ft_resp *acc = sg_virt(sg); - struct zfcp_adapter *adapter = ct->wka_port->adapter; struct zfcp_port *port, *tmp; unsigned long flags; LIST_HEAD(remove_lh); @@ -665,7 +647,7 @@ void zfcp_fc_scan_ports(struct work_struct *work) for (i = 0; i < 3; i++) { ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); if (!ret) { - ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); + ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries); if (ret == -EAGAIN) ssleep(1); else @@ -677,160 +659,109 @@ out: zfcp_fc_wka_port_put(&adapter->gs->ds); } - -struct zfcp_els_fc_job { - struct zfcp_send_els els; - struct fc_bsg_job *job; -}; - -static void zfcp_fc_generic_els_handler(unsigned long data) +static void zfcp_fc_ct_els_job_handler(void *data) { - struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data; - struct fc_bsg_job *job = els_fc_job->job; - struct fc_bsg_reply *reply = job->reply; + struct fc_bsg_job *job = data; + struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; + int status = zfcp_ct_els->status; + int reply_status; - if (els_fc_job->els.status) { - /* request rejected or timed out */ - reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; - goto out; - } - - reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; - reply->reply_payload_rcv_len = job->reply_payload.payload_len; - -out: - job->state_flags = FC_RQST_STATE_DONE; + reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; + job->reply->reply_data.ctels_reply.status = reply_status; + job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; job->job_done(job); - kfree(els_fc_job); } -int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) +static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, + struct zfcp_adapter *adapter) { - struct zfcp_els_fc_job *els_fc_job; + struct zfcp_fsf_ct_els *els = job->dd_data; struct fc_rport *rport = job->rport; - struct Scsi_Host *shost; - struct zfcp_adapter *adapter; struct zfcp_port *port; - u8 *port_did; - - shost = rport ? rport_to_shost(rport) : job->shost; - adapter = (struct zfcp_adapter *)shost->hostdata[0]; - - if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) - return -EINVAL; - - els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL); - if (!els_fc_job) - return -ENOMEM; + u32 d_id; - els_fc_job->els.adapter = adapter; if (rport) { port = zfcp_get_port_by_wwpn(adapter, rport->port_name); - if (!port) { - kfree(els_fc_job); + if (!port) return -EINVAL; - } - els_fc_job->els.d_id = port->d_id; + d_id = port->d_id; put_device(&port->sysfs_device); - } else { - port_did = job->request->rqst_data.h_els.port_id; - els_fc_job->els.d_id = (port_did[0] << 16) + - (port_did[1] << 8) + port_did[2]; - } - - els_fc_job->els.req = job->request_payload.sg_list; - els_fc_job->els.resp = job->reply_payload.sg_list; - els_fc_job->els.handler = zfcp_fc_generic_els_handler; - els_fc_job->els.handler_data = (unsigned long) els_fc_job; - els_fc_job->job = job; + } else + d_id = ntoh24(job->request->rqst_data.h_els.port_id); - return zfcp_fsf_send_els(&els_fc_job->els); + return zfcp_fsf_send_els(adapter, d_id, els); } -struct zfcp_ct_fc_job { - struct zfcp_send_ct ct; - struct fc_bsg_job *job; -}; - -static void zfcp_fc_generic_ct_handler(unsigned long data) -{ - struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data; - struct fc_bsg_job *job = ct_fc_job->job; - - job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ? - FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; - job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; - job->state_flags = FC_RQST_STATE_DONE; - job->job_done(job); - - zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); - - kfree(ct_fc_job); -} - -int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) +static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, + struct zfcp_adapter *adapter) { int ret; u8 gs_type; - struct fc_rport *rport = job->rport; - struct Scsi_Host *shost; - struct zfcp_adapter *adapter; - struct zfcp_ct_fc_job *ct_fc_job; + struct zfcp_fsf_ct_els *ct = job->dd_data; + struct zfcp_fc_wka_port *wka_port; u32 preamble_word1; - shost = rport ? rport_to_shost(rport) : job->shost; - - adapter = (struct zfcp_adapter *)shost->hostdata[0]; - if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) - return -EINVAL; - - ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL); - if (!ct_fc_job) - return -ENOMEM; - preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; gs_type = (preamble_word1 & 0xff000000) >> 24; switch (gs_type) { case FC_FST_ALIAS: - ct_fc_job->ct.wka_port = &adapter->gs->as; + wka_port = &adapter->gs->as; break; case FC_FST_MGMT: - ct_fc_job->ct.wka_port = &adapter->gs->ms; + wka_port = &adapter->gs->ms; break; case FC_FST_TIME: - ct_fc_job->ct.wka_port = &adapter->gs->ts; + wka_port = &adapter->gs->ts; break; case FC_FST_DIR: - ct_fc_job->ct.wka_port = &adapter->gs->ds; + wka_port = &adapter->gs->ds; break; default: - kfree(ct_fc_job); return -EINVAL; /* no such service */ } - ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port); - if (ret) { - kfree(ct_fc_job); + ret = zfcp_fc_wka_port_get(wka_port); + if (ret) return ret; - } - ct_fc_job->ct.req = job->request_payload.sg_list; - ct_fc_job->ct.resp = job->reply_payload.sg_list; - ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; - ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; - ct_fc_job->ct.completion = NULL; - ct_fc_job->job = job; + ret = zfcp_fsf_send_ct(wka_port, ct, NULL); + if (ret) + zfcp_fc_wka_port_put(wka_port); - ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); - if (ret) { - kfree(ct_fc_job); - zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); - } return ret; } +int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) +{ + struct Scsi_Host *shost; + struct zfcp_adapter *adapter; + struct zfcp_fsf_ct_els *ct_els = job->dd_data; + + shost = job->rport ? rport_to_shost(job->rport) : job->shost; + adapter = (struct zfcp_adapter *)shost->hostdata[0]; + + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) + return -EINVAL; + + ct_els->req = job->request_payload.sg_list; + ct_els->resp = job->reply_payload.sg_list; + ct_els->handler = zfcp_fc_ct_els_job_handler; + ct_els->handler_data = job; + + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + case FC_BSG_HST_ELS_NOLOGIN: + return zfcp_fc_exec_els_job(job, adapter); + case FC_BSG_RPT_CT: + case FC_BSG_HST_CT: + return zfcp_fc_exec_ct_job(job, adapter); + default: + return -EINVAL; + } +} + int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) { struct zfcp_fc_wka_ports *wka_ports; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 9c787e043ff8..cb2a3669a384 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -15,6 +15,7 @@ #include #include #include +#include "zfcp_fsf.h" #define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr)) #define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \ @@ -55,7 +56,7 @@ struct zfcp_fc_gid_pn_resp { * @gid_pn_resp: GID_PN response data */ struct zfcp_fc_gid_pn { - struct zfcp_send_ct ct; + struct zfcp_fsf_ct_els ct; struct scatterlist sg_req; struct scatterlist sg_resp; struct zfcp_fc_gid_pn_req gid_pn_req; @@ -90,7 +91,7 @@ struct zfcp_fc_gpn_ft_resp { * @sg_resp: scatter list entries for gpn_ft responses (per memory page) */ struct zfcp_fc_gpn_ft { - struct zfcp_send_ct ct; + struct zfcp_fsf_ct_els ct; struct scatterlist sg_req; struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; }; @@ -104,7 +105,7 @@ struct zfcp_fc_gpn_ft { * @adisc_resp: ELS ADISC response data */ struct zfcp_fc_els_adisc { - struct zfcp_send_els els; + struct zfcp_fsf_ct_els els; struct scatterlist req; struct scatterlist resp; struct fc_els_adisc adisc_req; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 057c93777f92..fb580b14a68e 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -961,10 +961,10 @@ out: static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; - struct zfcp_send_ct *send_ct = req->data; + struct zfcp_fsf_ct_els *ct = req->data; struct fsf_qtcb_header *header = &req->qtcb->header; - send_ct->status = -EINVAL; + ct->status = -EINVAL; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; @@ -972,7 +972,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: zfcp_dbf_san_ct_response(req); - send_ct->status = 0; + ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: zfcp_fsf_class_not_supp(req); @@ -1004,8 +1004,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) } skip_fsfstatus: - if (send_ct->handler) - send_ct->handler(send_ct->handler_data); + if (ct->handler) + ct->handler(ct->handler_data); } static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, @@ -1094,9 +1094,9 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, * @ct: pointer to struct zfcp_send_ct with data for request * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req */ -int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) +int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, + struct zfcp_fsf_ct_els *ct, mempool_t *pool) { - struct zfcp_fc_wka_port *wka_port = ct->wka_port; struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int ret = -EIO; @@ -1122,7 +1122,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) req->qtcb->header.port_handle = wka_port->handle; req->data = ct; - zfcp_dbf_san_ct_request(req); + zfcp_dbf_san_ct_request(req, wka_port->d_id); ret = zfcp_fsf_req_send(req); if (ret) @@ -1139,7 +1139,7 @@ out: static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) { - struct zfcp_send_els *send_els = req->data; + struct zfcp_fsf_ct_els *send_els = req->data; struct zfcp_port *port = send_els->port; struct fsf_qtcb_header *header = &req->qtcb->header; @@ -1159,9 +1159,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - if (port && (send_els->ls_code != ELS_ADISC)) - zfcp_fc_test_link(port); - /*fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: case FSF_SQ_RETRY_IF_POSSIBLE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1193,10 +1190,11 @@ skip_fsfstatus: * zfcp_fsf_send_els - initiate an ELS command (FC-FS) * @els: pointer to struct zfcp_send_els with data for the command */ -int zfcp_fsf_send_els(struct zfcp_send_els *els) +int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, + struct zfcp_fsf_ct_els *els) { struct zfcp_fsf_req *req; - struct zfcp_qdio *qdio = els->adapter->qdio; + struct zfcp_qdio *qdio = adapter->qdio; int ret = -EIO; spin_lock_bh(&qdio->req_q_lock); @@ -1216,7 +1214,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) if (ret) goto failed_send; - hton24(req->qtcb->bottom.support.d_id, els->d_id); + hton24(req->qtcb->bottom.support.d_id, d_id); req->handler = zfcp_fsf_send_els_handler; req->data = els; diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 206b7eaff5a0..b3de682b64cf 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -437,4 +437,22 @@ struct zfcp_blk_drv_data { u64 fabric_lat; } __attribute__ ((packed)); +/** + * struct zfcp_fsf_ct_els - zfcp data for ct or els request + * @req: scatter-gather list for request + * @resp: scatter-gather list for response + * @handler: handler function (called for response to the request) + * @handler_data: data passed to handler function + * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) + * @status: used to pass error status to calling function + */ +struct zfcp_fsf_ct_els { + struct scatterlist *req; + struct scatterlist *resp; + void (*handler)(void *); + void *handler_data; + struct zfcp_port *port; + int status; +}; + #endif /* FSF_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 3d168410036b..535f36cf2819 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -14,6 +14,7 @@ #include #include "zfcp_ext.h" #include "zfcp_dbf.h" +#include "zfcp_fc.h" static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); @@ -628,20 +629,6 @@ void zfcp_scsi_scan(struct work_struct *work) put_device(&unit->sysfs_device); } -static int zfcp_execute_fc_job(struct fc_bsg_job *job) -{ - switch (job->request->msgcode) { - case FC_BSG_RPT_ELS: - case FC_BSG_HST_ELS_NOLOGIN: - return zfcp_fc_execute_els_fc_job(job); - case FC_BSG_RPT_CT: - case FC_BSG_HST_CT: - return zfcp_fc_execute_ct_fc_job(job); - default: - return -EINVAL; - } -} - struct fc_function_template zfcp_transport_functions = { .show_starget_port_id = 1, .show_starget_port_name = 1, @@ -662,13 +649,14 @@ struct fc_function_template zfcp_transport_functions = { .get_host_port_state = zfcp_get_host_port_state, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, - .bsg_request = zfcp_execute_fc_job, + .bsg_request = zfcp_fc_exec_bsg_job, /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, .show_host_speed = 1, .show_host_port_id = 1, .disable_target_scan = 1, + .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), }; struct zfcp_data zfcp_data = { -- cgit v1.2.3-59-g8ed1b From ee744622c65cd66824e8dd1b9509e515c800de14 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:14 +0100 Subject: [SCSI] zfcp: Improve ELS ADISC handling Introduce kmem_cache for ELS ADISC data to guarantee the required hardware alignment and free the allocated memory in case the send failes. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 8 ++++++++ drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_fc.c | 11 ++++++++--- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 58bb17732f56..9d0c941b7d33 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -179,6 +179,11 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; + zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc", + sizeof(struct zfcp_fc_els_adisc)); + if (!zfcp_data.adisc_cache) + goto out_adisc_cache; + zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) @@ -206,6 +211,8 @@ out_ccw_register: out_misc: fc_release_transport(zfcp_data.scsi_transport_template); out_transport: + kmem_cache_destroy(zfcp_data.adisc_cache); +out_adisc_cache: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); @@ -224,6 +231,7 @@ static void __exit zfcp_module_exit(void) ccw_driver_unregister(&zfcp_ccw_driver); misc_deregister(&zfcp_cfdc_misc); fc_release_transport(zfcp_data.scsi_transport_template); + kmem_cache_destroy(zfcp_data.adisc_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.sr_buffer_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 21b29804a7a6..469d57f105db 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -347,6 +347,7 @@ struct zfcp_data { struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; + struct kmem_cache *adisc_cache; }; /********************** ZFCP SPECIFIC DEFINES ********************************/ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6d5ccc053e3a..ac5e3b7a3576 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -389,15 +389,16 @@ static void zfcp_fc_adisc_handler(void *data) out: atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->sysfs_device); - kfree(adisc); + kmem_cache_free(zfcp_data.adisc_cache, adisc); } static int zfcp_fc_adisc(struct zfcp_port *port) { struct zfcp_fc_els_adisc *adisc; struct zfcp_adapter *adapter = port->adapter; + int ret; - adisc = kzalloc(sizeof(struct zfcp_fc_els_adisc), GFP_ATOMIC); + adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC); if (!adisc) return -ENOMEM; @@ -420,7 +421,11 @@ static int zfcp_fc_adisc(struct zfcp_port *port) hton24(adisc->adisc_req.adisc_port_id, fc_host_port_id(adapter->scsi_host)); - return zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); + ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); + if (ret) + kmem_cache_free(zfcp_data.adisc_cache, adisc); + + return ret; } void zfcp_fc_link_test_work(struct work_struct *work) -- cgit v1.2.3-59-g8ed1b From 4c571c659e9d41332b6981ca5379047681ce9d2f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:15 +0100 Subject: [SCSI] zfcp: Update FSF error reporting The SCSI midlayer retries commands based on the remote port state and the command status reported by the driver. Returning DID_TRANSPORT_DISRUPTED is a better approach, use this for reporting FSF errors back to the SCSI midlayer. See http://marc.info/?l=linux-scsi&m=125668044215051&w=2 as reference. There is also no need in special treatment of ABORTED commands, so remove the ZFCP_STATUS_FSFREQ_ABORTED, the commands are then returned with DID_TRANSPORT_DISRUPTED. Also remove the ZFCP_STATUS_FSFREQ_RETRY: It is useless, no retry is happening in the FSF layer and nobody checks the state of this flag. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 2 -- drivers/s390/scsi/zfcp_fsf.c | 38 +++++++++++--------------------------- 2 files changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 469d57f105db..e43c6334bf69 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -110,10 +110,8 @@ #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 -#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 #define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400 -#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 /************************* STRUCTURE DEFINITIONS *****************************/ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index fb580b14a68e..3089a05c36a1 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -315,7 +315,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: return; case FSF_SQ_COMMAND_ABORTED: - req->status |= ZFCP_STATUS_FSFREQ_ABORTED; break; case FSF_SQ_NO_RECOM: dev_err(&req->adapter->ccw_device->dev, @@ -356,8 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) zfcp_dbf_hba_fsf_response(req); if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ + req->status |= ZFCP_STATUS_FSFREQ_ERROR; return; } @@ -375,7 +373,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) case FSF_PROT_ERROR_STATE: case FSF_PROT_SEQ_NUMB_ERROR: zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); - req->status |= ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PROT_UNSUPP_QTCB_TYPE: dev_err(&adapter->ccw_device->dev, @@ -884,13 +882,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_BOXED: zfcp_erp_port_boxed(unit->port, "fsafch3", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: zfcp_erp_unit_boxed(unit, "fsafch4", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (fsq->word[0]) { @@ -988,8 +984,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) case FSF_ACCESS_DENIED: break; case FSF_PORT_BOXED: - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_HANDLE_NOT_VALID: zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); @@ -1761,9 +1756,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) &unit->status); read_unlock(&port->unit_list_lock); zfcp_erp_port_boxed(port, "fscpph2", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; - + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { @@ -1867,8 +1860,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_BOXED: zfcp_erp_port_boxed(unit->port, "fsouh_2", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_SHARING_VIOLATION: if (header->fsf_status_qual.word[0]) @@ -2030,8 +2022,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_BOXED: zfcp_erp_port_boxed(unit->port, "fscuh_3", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (req->qtcb->header.fsf_status_qual.word[0]) { @@ -2164,13 +2155,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) return; } - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { - set_host_byte(scpnt, DID_SOFT_ERROR); - goto skip_fsfstatus; - } - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - set_host_byte(scpnt, DID_ERROR); + set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); goto skip_fsfstatus; } @@ -2266,13 +2252,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_BOXED: zfcp_erp_port_boxed(unit->port, "fssfch5", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: zfcp_erp_unit_boxed(unit, "fssfch6", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: if (header->fsf_status_qual.word[0] == -- cgit v1.2.3-59-g8ed1b From af4de36d911ab907b92c5f3f81ceff8474ed7485 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:16 +0100 Subject: [SCSI] zfcp: Block scsi_eh thread for rport state BLOCKED In case the SCSI error recovery starts because of a SCSI command timeout, but then something else triggers the rport to be deleted, the SCSI error recovery will run to the end and set the SCSI device offline. To prevent this, call the FC transport function fc_block_scsi_eh which waits until the rport leaves the BLOCKED state. This guarantees that communication is possible if the rport is ONLINE, or the SCSI devices will be removed if the rport state switches to NOT_PRESENT. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_scsi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 535f36cf2819..3f1011663af5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -209,6 +209,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) break; zfcp_erp_wait(adapter); + fc_block_scsi_eh(scpnt); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, @@ -248,6 +249,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) break; zfcp_erp_wait(adapter); + fc_block_scsi_eh(scpnt); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); @@ -289,6 +291,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); zfcp_erp_wait(adapter); + fc_block_scsi_eh(scpnt); return SUCCESS; } -- cgit v1.2.3-59-g8ed1b From 0fdd21330a9d7bc6790eae0aed768052c315ae44 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:17 +0100 Subject: [SCSI] zfcp: Activate fc4s attributes for zfcp in FC transport class Enable the display of supported and active fc4s for zfcp in the FC transport class. zfcp only supports FCP, so simply hard-code this information. The zfcp hbaapi already has this information hardcoded, but this would allow to switch from the coding in the zfcp hbaapi to the common FC transport attributes in the future. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 2 ++ drivers/s390/scsi/zfcp_scsi.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 3089a05c36a1..482dcd97aa5d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -495,6 +495,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_id(shost) = ntoh24(bottom->s_id); fc_host_speed(shost) = bottom->fc_link_speed; fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; + fc_host_supported_fc4s(shost)[2] = 1; /* FCP */ + fc_host_active_fc4s(shost)[2] = 1; /* FCP */ adapter->hydra_version = bottom->adapter_type; adapter->timer_ticks = bottom->timer_interval; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 3f1011663af5..72bb9bcf7356 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -643,6 +643,7 @@ struct fc_function_template zfcp_transport_functions = { .show_host_port_name = 1, .show_host_permanent_port_name = 1, .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_serial_number = 1, @@ -652,6 +653,7 @@ struct fc_function_template zfcp_transport_functions = { .get_host_port_state = zfcp_get_host_port_state, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, + .show_host_active_fc4s = 1, .bsg_request = zfcp_fc_exec_bsg_job, /* no functions registered for following dynamic attributes but directly set by LLDD */ -- cgit v1.2.3-59-g8ed1b From 54987386ee3790f3900de4df2ed4deb0e18dfc9f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 24 Nov 2009 16:54:18 +0100 Subject: [SCSI] zfcp: Remove flag ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP The flag ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP is never set and hence can be removed. This is a leftover from the time when zfcp had to decide whether the target supports a "logical unit reset" or not. Nowadays, the SCSI midlayer calls the eh_device_reset_handler or the eh_target_reset_handler and zfcp simply maps this to a "logical unit reset" or a "target reset". Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 1 - drivers/s390/scsi/zfcp_scsi.c | 3 --- 2 files changed, 4 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e43c6334bf69..e1b5b88e2ddb 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -111,7 +111,6 @@ #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 -#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 /************************* STRUCTURE DEFINITIONS *****************************/ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 72bb9bcf7356..771cc536a989 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -264,9 +264,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); retval = FAILED; - } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { - zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt); - retval = FAILED; } else zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); -- cgit v1.2.3-59-g8ed1b