aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c1
-rw-r--r--drivers/acpi/apei/hest.c6
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/amba-pl08x.c1168
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c33
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/pch_dma.c19
-rw-r--r--drivers/dma/ste_dma40.c191
-rw-r--r--drivers/dma/ste_dma40_ll.c246
-rw-r--r--drivers/dma/ste_dma40_ll.h36
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c41
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c11
-rw-r--r--drivers/gpu/drm/radeon/r300.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/i2c/i2c-core.c90
-rw-r--r--drivers/md/dm-table.c1
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/mmc/host/sdhci-of-core.c9
-rw-r--r--drivers/mtd/Kconfig19
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c55
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c116
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/chips/fwh_lock.h2
-rw-r--r--drivers/mtd/devices/m25p80.c39
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c5
-rw-r--r--drivers/mtd/maps/ck804xrom.c7
-rw-r--r--drivers/mtd/maps/esb2rom.c9
-rw-r--r--drivers/mtd/maps/ichxrom.c9
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/scx200_docflash.c5
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdconcat.c1
-rw-r--r--drivers/mtd/mtdoops.c3
-rw-r--r--drivers/mtd/mtdpart.c30
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/ams-delta.c80
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c89
-rw-r--r--drivers/mtd/nand/jz4740_nand.c57
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/nandsim.c39
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/onenand/omap2.c80
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c10
-rw-r--r--drivers/mtd/onenand/samsung.c7
-rw-r--r--drivers/mtd/ubi/build.c28
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/arm/ks8695net.c288
-rw-r--r--drivers/net/bfin_mac.c9
-rw-r--r--drivers/net/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c2
-rw-r--r--drivers/net/e1000e/lib.c20
-rw-r--r--drivers/net/e1000e/netdev.c223
-rw-r--r--drivers/net/e1000e/param.c6
-rw-r--r--drivers/net/e1000e/phy.c4
-rw-r--r--drivers/net/gianfar.c10
-rw-r--r--drivers/net/gianfar.h10
-rw-r--r--drivers/net/greth.c221
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c23
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/r8169.c43
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/net_driver.h10
-rw-r--r--drivers/net/tile/tilepro.c10
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/vxge/vxge-main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/nfc/pn544.c2
-rw-r--r--drivers/scsi/sd.c103
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/serial/samsung.c4
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/dw_spi_mmio.c5
-rw-r--r--drivers/spi/spi_imx.c6
-rw-r--r--drivers/spi/spi_tegra.c2
-rw-r--r--drivers/ssb/scan.c10
-rw-r--r--drivers/staging/autofs/dirhash.c5
-rw-r--r--drivers/staging/smbfs/dir.c4
-rw-r--r--drivers/target/Kconfig32
-rw-r--r--drivers/target/Makefile24
-rw-r--r--drivers/target/target_core_alua.c1991
-rw-r--r--drivers/target/target_core_alua.h126
-rw-r--r--drivers/target/target_core_cdb.c1131
-rw-r--r--drivers/target/target_core_configfs.c3225
-rw-r--r--drivers/target/target_core_device.c1694
-rw-r--r--drivers/target/target_core_fabric_configfs.c996
-rw-r--r--drivers/target/target_core_fabric_lib.c451
-rw-r--r--drivers/target/target_core_file.c688
-rw-r--r--drivers/target/target_core_file.h50
-rw-r--r--drivers/target/target_core_hba.c185
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c808
-rw-r--r--drivers/target/target_core_iblock.h40
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pr.c4252
-rw-r--r--drivers/target/target_core_pr.h67
-rw-r--r--drivers/target/target_core_pscsi.c1470
-rw-r--r--drivers/target/target_core_pscsi.h65
-rw-r--r--drivers/target/target_core_rd.c1091
-rw-r--r--drivers/target/target_core_rd.h73
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_tmr.c404
-rw-r--r--drivers/target/target_core_tpg.c826
-rw-r--r--drivers/target/target_core_transport.c6134
-rw-r--r--drivers/target/target_core_ua.c332
-rw-r--r--drivers/target/target_core_ua.h36
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/video/ep93xx-fb.c6
168 files changed, 29712 insertions, 2047 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dd0a5b5e9bf3..9bfb71ff3a6a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@ source "drivers/ata/Kconfig"
source "drivers/md/Kconfig"
+source "drivers/target/Kconfig"
+
source "drivers/message/fusion/Kconfig"
source "drivers/firewire/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index ef5132469f58..7eb35f479461 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -46,6 +46,7 @@ obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
+obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-y += net/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 9bb69c59bb12..0e4dba0d0325 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -228,8 +228,10 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_present;
*/
ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 8e31bb5a973a..38bba66fcce5 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -293,8 +293,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
*
******************************************************************************/
static u8 acpi_ev_global_lock_pending;
-static spinlock_t _acpi_ev_global_lock_pending_lock;
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
static u32 acpi_ev_global_lock_handler(void *context)
{
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa495b433..199528ff7f1d 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -85,6 +85,7 @@ acpi_status acpi_ut_mutex_initialize(void)
spin_lock_init(acpi_gbl_gpe_lock);
spin_lock_init(acpi_gbl_hardware_lock);
+ spin_lock_init(acpi_ev_global_lock_pending_lock);
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 4ee58e72b730..abda3786a5d7 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -201,14 +201,14 @@ void __init acpi_hest_init(void)
int rc = -ENODEV;
unsigned int ghes_count = 0;
- if (acpi_disabled)
- return;
-
if (hest_disable) {
pr_info(HEST_PFX "Table parsing disabled.\n");
return;
}
+ if (acpi_disabled)
+ goto err;
+
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926afec110..5eb25eb3ea48 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
int ret = 0;
- int nr_cpu_entries = nr_cpu_ids;
-#ifdef CONFIG_X86
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- nr_cpu_entries = MAX_LOCAL_APIC;
-#endif
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, nr_cpu_entries);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, nr_cpu_entries);
+ acpi_parse_processor_affinity, 0);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d9766797cd98..85249395623b 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -633,11 +633,11 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
static int __init acpi_pci_root_init(void)
{
+ acpi_hest_init();
+
if (acpi_pci_disabled)
return 0;
- acpi_hest_init();
-
pci_acpi_crs_quirks();
if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
return -ENODEV;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef138731c0ea..1c28816152fa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,11 +200,16 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Topcliff (Intel EG20T) PCH DMA support"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
depends on PCI && X86
select DMA_ENGINE
help
- Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
+ Enable support for Intel EG20T PCH DMA engine.
+
+ This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
+ Output Hub) which is for IVI(In-Vehicle Infotainment) use.
+ ML7213 is companion chip for Intel Atom E6xx series.
+ ML7213 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9ac3a2..297f48b0cba9 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * The full GNU General Public License is iin this distribution in the
- * file called COPYING.
+ * The full GNU General Public License is in this distribution in the file
+ * called COPYING.
*
* Documentation: ARM DDI 0196G == PL080
- * Documentation: ARM DDI 0218E == PL081
+ * Documentation: ARM DDI 0218E == PL081
*
- * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
- * any channel.
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
+ * channel.
*
* The PL080 has 8 channels available for simultaneous use, and the PL081
* has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
*
* ASSUMES default (little) endianness for DMA transfers
*
- * Only DMAC flow control is implemented
+ * The PL08x has two flow control settings:
+ * - DMAC flow control: the transfer size defines the number of transfers
+ * which occur for the current LLI entry, and the DMAC raises TC at the
+ * end of every LLI entry. Observed behaviour shows the DMAC listening
+ * to both the BREQ and SREQ signals (contrary to documented),
+ * transferring data if either is active. The LBREQ and LSREQ signals
+ * are ignored.
+ *
+ * - Peripheral flow control: the transfer size is ignored (and should be
+ * zero). The data is transferred from the current LLI entry, until
+ * after the final transfer signalled by LBREQ or LSREQ. The DMAC
+ * will then move to the next LLI entry.
+ *
+ * Only the former works sanely with scatter lists, so we only implement
+ * the DMAC flow control method. However, peripherals which use the LBREQ
+ * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
+ * these hardware restrictions prevents them from using scatter DMA.
*
* Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,39 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
-#include <linux/amba/bus.h>
#include <linux/dmaengine.h>
+#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/hardware/pl080.h>
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
#define DRIVER_NAME "pl08xdmac"
/**
- * struct vendor_data - vendor-specific config parameters
- * for PL08x derivates
- * @name: the name of this specific variant
+ * struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
- * @dualmaster: whether this version supports dual AHB masters
- * or not.
+ * @dualmaster: whether this version supports dual AHB masters or not.
*/
struct vendor_data {
- char *name;
u8 channels;
bool dualmaster;
};
/*
* PL08X private data structures
- * An LLI struct - see pl08x TRM
- * Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info
- * is in cctl
+ * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info is in cctl. Also note that these
+ * are fixed 32-bit quantities.
*/
-struct lli {
- dma_addr_t src;
- dma_addr_t dst;
- dma_addr_t next;
+struct pl08x_lli {
+ u32 src;
+ u32 dst;
+ u32 lli;
u32 cctl;
};
@@ -119,6 +124,8 @@ struct lli {
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
*/
struct pl08x_driver_data {
@@ -126,11 +133,13 @@ struct pl08x_driver_data {
struct dma_device memcpy;
void __iomem *base;
struct amba_device *adev;
- struct vendor_data *vd;
+ const struct vendor_data *vd;
struct pl08x_platform_data *pd;
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
int pool_ctr;
+ u8 lli_buses;
+ u8 mem_buses;
spinlock_t lock;
};
@@ -152,9 +161,9 @@ struct pl08x_driver_data {
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
-/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+/* Maximum times we call dma_pool_alloc on this pool without freeing */
#define PL08X_MAX_ALLOCS 0x40
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
return container_of(chan, struct pl08x_dma_chan, chan);
}
+static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct pl08x_txd, tx);
+}
+
/*
* Physical channel handling
*/
@@ -177,88 +191,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
/*
* Set the initial DMA register values i.e. those for the first LLI
- * The next lli pointer and the configuration interrupt bit have
- * been set when the LLIs were constructed
+ * The next LLI pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed. Poke them into the hardware
+ * and start the transfer.
*/
-static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
- struct pl08x_phy_chan *ch)
-{
- /* Wait for channel inactive */
- while (pl08x_phy_channel_busy(ch))
- ;
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=%08x, cdst=%08x, "
- "cctl=%08x, clli=%08x, ccfg=%08x\n",
- ch->id,
- ch->csrc,
- ch->cdst,
- ch->cctl,
- ch->clli,
- ch->ccfg);
-
- writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
- writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
- writel(ch->clli, ch->base + PL080_CH_LLI);
- writel(ch->cctl, ch->base + PL080_CH_CONTROL);
- writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
-}
-
-static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
+static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
{
- struct pl08x_channel_data *cd = plchan->cd;
+ struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
- struct pl08x_txd *txd = plchan->at;
-
- /* Copy the basic control register calculated at transfer config */
- phychan->csrc = txd->csrc;
- phychan->cdst = txd->cdst;
- phychan->clli = txd->clli;
- phychan->cctl = txd->cctl;
-
- /* Assign the signal to the proper control registers */
- phychan->ccfg = cd->ccfg;
- phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
- phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
- /* If it wasn't set from AMBA, ignore it */
- if (txd->direction == DMA_TO_DEVICE)
- /* Select signal as destination */
- phychan->ccfg |=
- (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
- else if (txd->direction == DMA_FROM_DEVICE)
- /* Select signal as source */
- phychan->ccfg |=
- (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
- /* Always enable error interrupts */
- phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
- /* Always enable terminal interrupts */
- phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
-}
-
-/*
- * Enable the DMA channel
- * Assumes all other configuration bits have been set
- * as desired before this code is called
- */
-static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
- struct pl08x_phy_chan *ch)
-{
+ struct pl08x_lli *lli = &txd->llis_va[0];
u32 val;
- /*
- * Do not access config register until channel shows as disabled
- */
- while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
- ;
+ plchan->at = txd;
- /*
- * Do not access config register until channel shows as inactive
- */
- val = readl(ch->base + PL080_CH_CONFIG);
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(phychan))
+ cpu_relax();
+
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
+ txd->ccfg);
+
+ writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
+ writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
+ writel(lli->lli, phychan->base + PL080_CH_LLI);
+ writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
+ writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+
+ /* Enable the DMA channel */
+ /* Do not access config register until channel shows as disabled */
+ while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
+ cpu_relax();
+
+ /* Do not access config register until channel shows as inactive */
+ val = readl(phychan->base + PL080_CH_CONFIG);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(phychan->base + PL080_CH_CONFIG);
- writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
}
/*
@@ -266,10 +239,8 @@ static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
*
* Disabling individual channels could lose data.
*
- * Disable the peripheral DMA after disabling the DMAC
- * in order to allow the DMAC FIFO to drain, and
- * hence allow the channel to show inactive
- *
+ * Disable the peripheral DMA after disabling the DMAC in order to allow
+ * the DMAC FIFO to drain, and hence allow the channel to show inactive
*/
static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
{
@@ -282,7 +253,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
/* Wait for channel inactive */
while (pl08x_phy_channel_busy(ch))
- ;
+ cpu_relax();
}
static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -333,54 +304,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct pl08x_phy_chan *ch;
- struct pl08x_txd *txdi = NULL;
struct pl08x_txd *txd;
unsigned long flags;
- u32 bytes = 0;
+ size_t bytes = 0;
spin_lock_irqsave(&plchan->lock, flags);
-
ch = plchan->phychan;
txd = plchan->at;
/*
- * Next follow the LLIs to get the number of pending bytes in the
- * currently active transaction.
+ * Follow the LLIs to get the number of remaining
+ * bytes in the currently active transaction.
*/
if (ch && txd) {
- struct lli *llis_va = txd->llis_va;
- struct lli *llis_bus = (struct lli *) txd->llis_bus;
- u32 clli = readl(ch->base + PL080_CH_LLI);
+ u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the bytes in the current active LLI */
+ /* First get the remaining bytes in the active transfer */
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
if (clli) {
- int i = 0;
+ struct pl08x_lli *llis_va = txd->llis_va;
+ dma_addr_t llis_bus = txd->llis_bus;
+ int index;
+
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ index = (clli - llis_bus) / sizeof(struct pl08x_lli);
- /* Forward to the LLI pointed to by clli */
- while ((clli != (u32) &(llis_bus[i])) &&
- (i < MAX_NUM_TSFR_LLIS))
- i++;
+ for (; index < MAX_NUM_TSFR_LLIS; index++) {
+ bytes += get_bytes_in_cctl(llis_va[index].cctl);
- while (clli) {
- bytes += get_bytes_in_cctl(llis_va[i].cctl);
/*
- * A clli of 0x00000000 will terminate the
- * LLI list
+ * A LLI pointer of 0 terminates the LLI list
*/
- clli = llis_va[i].next;
- i++;
+ if (!llis_va[index].lli)
+ break;
}
}
}
/* Sum up all queued transactions */
- if (!list_empty(&plchan->desc_list)) {
- list_for_each_entry(txdi, &plchan->desc_list, node) {
+ if (!list_empty(&plchan->pend_list)) {
+ struct pl08x_txd *txdi;
+ list_for_each_entry(txdi, &plchan->pend_list, node) {
bytes += txdi->len;
}
-
}
spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +363,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
/*
* Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
*/
static struct pl08x_phy_chan *
pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +376,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
unsigned long flags;
int i;
- /*
- * Try to locate a physical channel to be used for
- * this transfer. If all are taken return NULL and
- * the requester will have to cope by using some fallback
- * PIO mode or retrying later.
- */
for (i = 0; i < pl08x->vd->channels; i++) {
ch = &pl08x->phy_chans[i];
@@ -465,11 +436,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
}
static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
- u32 tsize)
+ size_t tsize)
{
u32 retbits = cctl;
- /* Remove all src, dst and transfersize bits */
+ /* Remove all src, dst and transfer size bits */
retbits &= ~PL080_CONTROL_DWIDTH_MASK;
retbits &= ~PL080_CONTROL_SWIDTH_MASK;
retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +480,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
return retbits;
}
+struct pl08x_lli_build_data {
+ struct pl08x_txd *txd;
+ struct pl08x_driver_data *pl08x;
+ struct pl08x_bus_data srcbus;
+ struct pl08x_bus_data dstbus;
+ size_t remainder;
+};
+
/*
- * Autoselect a master bus to use for the transfer
- * this prefers the destination bus if both available
- * if fixed address on one bus the other will be chosen
+ * Autoselect a master bus to use for the transfer this prefers the
+ * destination bus if both available if fixed address on one bus the
+ * other will be chosen
*/
-void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
- struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
- struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
+ struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{
if (!(cctl & PL080_CONTROL_DST_INCR)) {
- *mbus = src_bus;
- *sbus = dst_bus;
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
- *mbus = dst_bus;
- *sbus = src_bus;
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
} else {
- if (dst_bus->buswidth == 4) {
- *mbus = dst_bus;
- *sbus = src_bus;
- } else if (src_bus->buswidth == 4) {
- *mbus = src_bus;
- *sbus = dst_bus;
- } else if (dst_bus->buswidth == 2) {
- *mbus = dst_bus;
- *sbus = src_bus;
- } else if (src_bus->buswidth == 2) {
- *mbus = src_bus;
- *sbus = dst_bus;
+ if (bd->dstbus.buswidth == 4) {
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
+ } else if (bd->srcbus.buswidth == 4) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
+ } else if (bd->dstbus.buswidth == 2) {
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
+ } else if (bd->srcbus.buswidth == 2) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else {
- /* src_bus->buswidth == 1 */
- *mbus = dst_bus;
- *sbus = src_bus;
+ /* bd->srcbus.buswidth == 1 */
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
}
}
}
/*
- * Fills in one LLI for a certain transfer descriptor
- * and advance the counter
+ * Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
- struct pl08x_txd *txd, int num_llis, int len,
- u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl)
{
- struct lli *llis_va = txd->llis_va;
- struct lli *llis_bus = (struct lli *) txd->llis_bus;
+ struct pl08x_lli *llis_va = bd->txd->llis_va;
+ dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = txd->srcbus.addr;
- llis_va[num_llis].dst = txd->dstbus.addr;
-
- /*
- * On versions with dual masters, you can optionally AND on
- * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
- * in new LLIs with that controller, but we always try to
- * choose AHB1 to point into memory. The idea is to have AHB2
- * fixed on the peripheral and AHB1 messing around in the
- * memory. So we don't manipulate this bit currently.
- */
-
- llis_va[num_llis].next =
- (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+ llis_va[num_llis].cctl = cctl;
+ llis_va[num_llis].src = bd->srcbus.addr;
+ llis_va[num_llis].dst = bd->dstbus.addr;
+ llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+ if (bd->pl08x->lli_buses & PL08X_AHB2)
+ llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
if (cctl & PL080_CONTROL_SRC_INCR)
- txd->srcbus.addr += len;
+ bd->srcbus.addr += len;
if (cctl & PL080_CONTROL_DST_INCR)
- txd->dstbus.addr += len;
+ bd->dstbus.addr += len;
- *remainder -= len;
+ BUG_ON(bd->remainder < len);
- return num_llis + 1;
+ bd->remainder -= len;
}
/*
- * Return number of bytes to fill to boundary, or len
+ * Return number of bytes to fill to boundary, or len.
+ * This calculation works for any value of addr.
*/
-static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
{
- u32 boundary;
-
- boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
- << PL08X_BOUNDARY_SHIFT;
+ size_t boundary_len = PL08X_BOUNDARY_SIZE -
+ (addr & (PL08X_BOUNDARY_SIZE - 1));
- if (boundary < addr + len)
- return boundary - addr;
- else
- return len;
+ return min(boundary_len, len);
}
/*
@@ -608,20 +571,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
- struct pl08x_channel_data *cd = txd->cd;
struct pl08x_bus_data *mbus, *sbus;
- u32 remainder;
+ struct pl08x_lli_build_data bd;
int num_llis = 0;
u32 cctl;
- int max_bytes_per_lli;
- int total_bytes = 0;
- struct lli *llis_va;
- struct lli *llis_bus;
-
- if (!txd) {
- dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
- return 0;
- }
+ size_t max_bytes_per_lli;
+ size_t total_bytes = 0;
+ struct pl08x_lli *llis_va;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
&txd->llis_bus);
@@ -632,121 +588,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x->pool_ctr++;
- /*
- * Initialize bus values for this transfer
- * from the passed optimal values
- */
- if (!cd) {
- dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
- return 0;
- }
+ /* Get the default CCTL */
+ cctl = txd->cctl;
- /* Get the default CCTL from the platform data */
- cctl = cd->cctl;
-
- /*
- * On the PL080 we have two bus masters and we
- * should select one for source and one for
- * destination. We try to use AHB2 for the
- * bus which does not increment (typically the
- * peripheral) else we just choose something.
- */
- cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
- if (pl08x->vd->dualmaster) {
- if (cctl & PL080_CONTROL_SRC_INCR)
- /* Source increments, use AHB2 for destination */
- cctl |= PL080_CONTROL_DST_AHB2;
- else if (cctl & PL080_CONTROL_DST_INCR)
- /* Destination increments, use AHB2 for source */
- cctl |= PL080_CONTROL_SRC_AHB2;
- else
- /* Just pick something, source AHB1 dest AHB2 */
- cctl |= PL080_CONTROL_DST_AHB2;
- }
+ bd.txd = txd;
+ bd.pl08x = pl08x;
+ bd.srcbus.addr = txd->src_addr;
+ bd.dstbus.addr = txd->dst_addr;
/* Find maximum width of the source bus */
- txd->srcbus.maxwidth =
+ bd.srcbus.maxwidth =
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
PL080_CONTROL_SWIDTH_SHIFT);
/* Find maximum width of the destination bus */
- txd->dstbus.maxwidth =
+ bd.dstbus.maxwidth =
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
PL080_CONTROL_DWIDTH_SHIFT);
/* Set up the bus widths to the maximum */
- txd->srcbus.buswidth = txd->srcbus.maxwidth;
- txd->dstbus.buswidth = txd->dstbus.maxwidth;
+ bd.srcbus.buswidth = bd.srcbus.maxwidth;
+ bd.dstbus.buswidth = bd.dstbus.maxwidth;
dev_vdbg(&pl08x->adev->dev,
"%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+ __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
- max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+ max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %d\n",
+ "%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
/* We need to count this down to zero */
- remainder = txd->len;
+ bd.remainder = txd->len;
dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %d\n",
- __func__, remainder);
+ "%s remainder = %zu\n",
+ __func__, bd.remainder);
/*
* Choose bus to align to
* - prefers destination bus if both available
* - if fixed address on one bus chooses other
- * - modifies cctl to choose an apropriate master
- */
- pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
- &mbus, &sbus, cctl);
-
-
- /*
- * The lowest bit of the LLI register
- * is also used to indicate which master to
- * use for reading the LLIs.
*/
+ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
if (txd->len < mbus->buswidth) {
- /*
- * Less than a bus width available
- * - send as single bytes
- */
- while (remainder) {
+ /* Less than a bus width available - send as single bytes */
+ while (bd.remainder) {
dev_vdbg(&pl08x->adev->dev,
"%s single byte LLIs for a transfer of "
- "less than a bus width (remain %08x)\n",
- __func__, remainder);
+ "less than a bus width (remain 0x%08x)\n",
+ __func__, bd.remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
- cctl, &remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
} else {
- /*
- * Make one byte LLIs until master bus is aligned
- * - slave will then be aligned also
- */
+ /* Make one byte LLIs until master bus is aligned */
while ((mbus->addr) % (mbus->buswidth)) {
dev_vdbg(&pl08x->adev->dev,
"%s adjustment lli for less than bus width "
- "(remain %08x)\n",
- __func__, remainder);
+ "(remain 0x%08x)\n",
+ __func__, bd.remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis = pl08x_fill_lli_for_desc
- (pl08x, txd, num_llis, 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
/*
- * Master now aligned
+ * Master now aligned
* - if slave is not then we must set its width down
*/
if (sbus->addr % sbus->buswidth) {
@@ -761,63 +675,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* Make largest possible LLIs until less than one bus
* width left
*/
- while (remainder > (mbus->buswidth - 1)) {
- int lli_len, target_len;
- int tsize;
- int odd_bytes;
+ while (bd.remainder > (mbus->buswidth - 1)) {
+ size_t lli_len, target_len, tsize, odd_bytes;
/*
* If enough left try to send max possible,
* otherwise try to send the remainder
*/
- target_len = remainder;
- if (remainder > max_bytes_per_lli)
- target_len = max_bytes_per_lli;
+ target_len = min(bd.remainder, max_bytes_per_lli);
/*
- * Set bus lengths for incrementing busses
- * to number of bytes which fill to next memory
- * boundary
+ * Set bus lengths for incrementing buses to the
+ * number of bytes which fill to next memory boundary,
+ * limiting on the target length calculated above.
*/
if (cctl & PL080_CONTROL_SRC_INCR)
- txd->srcbus.fill_bytes =
- pl08x_pre_boundary(
- txd->srcbus.addr,
- remainder);
+ bd.srcbus.fill_bytes =
+ pl08x_pre_boundary(bd.srcbus.addr,
+ target_len);
else
- txd->srcbus.fill_bytes =
- max_bytes_per_lli;
+ bd.srcbus.fill_bytes = target_len;
if (cctl & PL080_CONTROL_DST_INCR)
- txd->dstbus.fill_bytes =
- pl08x_pre_boundary(
- txd->dstbus.addr,
- remainder);
+ bd.dstbus.fill_bytes =
+ pl08x_pre_boundary(bd.dstbus.addr,
+ target_len);
else
- txd->dstbus.fill_bytes =
- max_bytes_per_lli;
+ bd.dstbus.fill_bytes = target_len;
- /*
- * Find the nearest
- */
- lli_len = min(txd->srcbus.fill_bytes,
- txd->dstbus.fill_bytes);
+ /* Find the nearest */
+ lli_len = min(bd.srcbus.fill_bytes,
+ bd.dstbus.fill_bytes);
- BUG_ON(lli_len > remainder);
+ BUG_ON(lli_len > bd.remainder);
if (lli_len <= 0) {
dev_err(&pl08x->adev->dev,
- "%s lli_len is %d, <= 0\n",
+ "%s lli_len is %zu, <= 0\n",
__func__, lli_len);
return 0;
}
if (lli_len == target_len) {
/*
- * Can send what we wanted
- */
- /*
- * Maintain alignment
+ * Can send what we wanted.
+ * Maintain alignment
*/
lli_len = (lli_len/mbus->buswidth) *
mbus->buswidth;
@@ -825,17 +727,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
} else {
/*
* So now we know how many bytes to transfer
- * to get to the nearest boundary
- * The next lli will past the boundary
- * - however we may be working to a boundary
- * on the slave bus
- * We need to ensure the master stays aligned
+ * to get to the nearest boundary. The next
+ * LLI will past the boundary. However, we
+ * may be working to a boundary on the slave
+ * bus. We need to ensure the master stays
+ * aligned, and that we are working in
+ * multiples of the bus widths.
*/
odd_bytes = lli_len % mbus->buswidth;
- /*
- * - and that we are working in multiples
- * of the bus widths
- */
lli_len -= odd_bytes;
}
@@ -855,41 +754,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (target_len != lli_len) {
dev_vdbg(&pl08x->adev->dev,
- "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+ "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
__func__, target_len, lli_len, txd->len);
}
cctl = pl08x_cctl_bits(cctl,
- txd->srcbus.buswidth,
- txd->dstbus.buswidth,
+ bd.srcbus.buswidth,
+ bd.dstbus.buswidth,
tsize);
dev_vdbg(&pl08x->adev->dev,
- "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
- __func__, lli_len, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
- num_llis, lli_len, cctl,
- &remainder);
+ "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+ __func__, lli_len, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++,
+ lli_len, cctl);
total_bytes += lli_len;
}
if (odd_bytes) {
/*
- * Creep past the boundary,
- * maintaining master alignment
+ * Creep past the boundary, maintaining
+ * master alignment
*/
int j;
for (j = 0; (j < mbus->buswidth)
- && (remainder); j++) {
+ && (bd.remainder); j++) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
- "%s align with boundardy, single byte (remain %08x)\n",
- __func__, remainder);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x,
- txd, num_llis, 1,
- cctl, &remainder);
+ "%s align with boundary, single byte (remain 0x%08zx)\n",
+ __func__, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd,
+ num_llis++, 1, cctl);
total_bytes++;
}
}
@@ -898,25 +794,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/*
* Send any odd bytes
*/
- if (remainder < 0) {
- dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
- __func__, remainder);
- return 0;
- }
-
- while (remainder) {
+ while (bd.remainder) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
- "%s align with boundardy, single odd byte (remain %d)\n",
- __func__, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
- 1, cctl, &remainder);
+ "%s align with boundary, single odd byte (remain %zu)\n",
+ __func__, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
}
if (total_bytes != txd->len) {
dev_err(&pl08x->adev->dev,
- "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
__func__, total_bytes, txd->len);
return 0;
}
@@ -927,41 +816,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
__func__, (u32) MAX_NUM_TSFR_LLIS);
return 0;
}
- /*
- * Decide whether this is a loop or a terminated transfer
- */
- llis_va = txd->llis_va;
- llis_bus = (struct lli *) txd->llis_bus;
- if (cd->circular_buffer) {
- /*
- * Loop the circular buffer so that the next element
- * points back to the beginning of the LLI.
- */
- llis_va[num_llis - 1].next =
- (dma_addr_t)((unsigned int)&(llis_bus[0]));
- } else {
- /*
- * On non-circular buffers, the final LLI terminates
- * the LLI.
- */
- llis_va[num_llis - 1].next = 0;
- /*
- * The final LLI element shall also fire an interrupt
- */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
- }
-
- /* Now store the channel register values */
- txd->csrc = llis_va[0].src;
- txd->cdst = llis_va[0].dst;
- if (num_llis > 1)
- txd->clli = llis_va[0].next;
- else
- txd->clli = 0;
-
- txd->cctl = llis_va[0].cctl;
- /* ccfg will be set at physical channel allocation time */
+ llis_va = txd->llis_va;
+ /* The final LLI terminates the LLI. */
+ llis_va[num_llis - 1].lli = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
#ifdef VERBOSE_DEBUG
{
@@ -969,13 +829,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+ "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
i,
&llis_va[i],
llis_va[i].src,
llis_va[i].dst,
llis_va[i].cctl,
- llis_va[i].next
+ llis_va[i].lli
);
}
}
@@ -988,14 +848,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
- if (!txd)
- dev_err(&pl08x->adev->dev,
- "%s no descriptor to free\n",
- __func__);
-
/* Free the LLI */
- dma_pool_free(pl08x->pool, txd->llis_va,
- txd->llis_bus);
+ dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pl08x->pool_ctr--;
@@ -1008,13 +862,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txdi = NULL;
struct pl08x_txd *next;
- if (!list_empty(&plchan->desc_list)) {
+ if (!list_empty(&plchan->pend_list)) {
list_for_each_entry_safe(txdi,
- next, &plchan->desc_list, node) {
+ next, &plchan->pend_list, node) {
list_del(&txdi->node);
pl08x_free_txd(pl08x, txdi);
}
-
}
}
@@ -1069,6 +922,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
+
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_TO_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_FROM_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +935,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
ch->signal,
plchan->name);
+ plchan->phychan_hold++;
plchan->phychan = ch;
return 0;
}
+static void release_phy_channel(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ plchan->phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ plchan->phychan = NULL;
+}
+
static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+ struct pl08x_txd *txd = to_pl08x_txd(tx);
+ unsigned long flags;
- atomic_inc(&plchan->last_issued);
- tx->cookie = atomic_read(&plchan->last_issued);
- /* This unlock follows the lock in the prep() function */
- spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ spin_lock_irqsave(&plchan->lock, flags);
+
+ plchan->chan.cookie += 1;
+ if (plchan->chan.cookie < 0)
+ plchan->chan.cookie = 1;
+ tx->cookie = plchan->chan.cookie;
+
+ /* Put this onto the pending list */
+ list_add_tail(&txd->node, &plchan->pend_list);
+
+ /*
+ * If there was no physical channel available for this memcpy,
+ * stack the request up and indicate that the channel is waiting
+ * for a free physical channel.
+ */
+ if (!plchan->slave && !plchan->phychan) {
+ /* Do this memcpy whenever there is a channel ready */
+ plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting = txd;
+ } else {
+ plchan->phychan_hold--;
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
return tx->cookie;
}
@@ -1102,10 +996,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
}
/*
- * Code accessing dma_async_is_complete() in a tight loop
- * may give problems - could schedule where indicated.
- * If slaves are relying on interrupts to signal completion this
- * function must not be called with interrupts disabled
+ * Code accessing dma_async_is_complete() in a tight loop may give problems.
+ * If slaves are relying on interrupts to signal completion this function
+ * must not be called with interrupts disabled.
*/
static enum dma_status
pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1011,7 @@ pl08x_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
u32 bytesleft = 0;
- last_used = atomic_read(&plchan->last_issued);
+ last_used = plchan->chan.cookie;
last_complete = plchan->lc;
ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1021,9 @@ pl08x_dma_tx_status(struct dma_chan *chan,
}
/*
- * schedule(); could be inserted here
- */
-
- /*
* This cookie not complete yet
*/
- last_used = atomic_read(&plchan->last_issued);
+ last_used = plchan->chan.cookie;
last_complete = plchan->lc;
/* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1088,35 @@ static const struct burst_table burst_sizes[] = {
},
};
-static void dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
+ dma_addr_t addr;
u32 maxburst;
u32 cctl = 0;
- /* Mask out all except src and dst channel */
- u32 ccfg = cd->ccfg & 0x000003DEU;
- int i = 0;
+ int i;
+
+ if (!plchan->slave)
+ return -EINVAL;
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- plchan->runtime_addr = config->dst_addr;
- cctl |= PL080_CONTROL_SRC_INCR;
- ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- plchan->runtime_addr = config->src_addr;
- cctl |= PL080_CONTROL_DST_INCR;
- ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien transfer direction\n");
- return;
+ return -EINVAL;
}
switch (addr_width) {
@@ -1248,42 +1135,40 @@ static void dma_set_runtime_config(struct dma_chan *chan,
default:
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
- return;
+ return -EINVAL;
}
/*
* Now decide on a maxburst:
- * If this channel will only request single transfers, set
- * this down to ONE element.
+ * If this channel will only request single transfers, set this
+ * down to ONE element. Also select one element if no maxburst
+ * is specified.
*/
- if (plchan->cd->single) {
+ if (plchan->cd->single || maxburst == 0) {
cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
(PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
} else {
- while (i < ARRAY_SIZE(burst_sizes)) {
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
if (burst_sizes[i].burstwords <= maxburst)
break;
- i++;
- }
cctl |= burst_sizes[i].reg;
}
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- cctl &= ~PL080_CONTROL_PROT_MASK;
- cctl |= PL080_CONTROL_PROT_SYS;
+ plchan->runtime_addr = addr;
/* Modify the default channel data to fit PrimeCell request */
cd->cctl = cctl;
- cd->ccfg = ccfg;
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
- "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+ "maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
addr_width,
maxburst,
- cctl, ccfg);
+ cctl);
+
+ return 0;
}
/*
@@ -1293,35 +1178,26 @@ static void dma_set_runtime_config(struct dma_chan *chan,
static void pl08x_issue_pending(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- struct pl08x_driver_data *pl08x = plchan->host;
unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags);
- /* Something is already active */
- if (plchan->at) {
- spin_unlock_irqrestore(&plchan->lock, flags);
- return;
- }
-
- /* Didn't get a physical channel so waiting for it ... */
- if (plchan->state == PL08X_CHAN_WAITING)
+ /* Something is already active, or we're waiting for a channel... */
+ if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
return;
+ }
/* Take the first element in the queue and execute it */
- if (!list_empty(&plchan->desc_list)) {
+ if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *next;
- next = list_first_entry(&plchan->desc_list,
+ next = list_first_entry(&plchan->pend_list,
struct pl08x_txd,
node);
list_del(&next->node);
- plchan->at = next;
plchan->state = PL08X_CHAN_RUNNING;
- /* Configure the physical channel for the active txd */
- pl08x_config_phychan_for_txd(plchan);
- pl08x_set_cregs(pl08x, plchan->phychan);
- pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ pl08x_start_txd(plchan, next);
}
spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1206,17 @@ static void pl08x_issue_pending(struct dma_chan *chan)
static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
struct pl08x_txd *txd)
{
- int num_llis;
struct pl08x_driver_data *pl08x = plchan->host;
- int ret;
+ unsigned long flags;
+ int num_llis, ret;
num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-
- if (!num_llis)
+ if (!num_llis) {
+ kfree(txd);
return -EINVAL;
+ }
- spin_lock_irqsave(&plchan->lock, plchan->lockflags);
-
- /*
- * If this device is not using a circular buffer then
- * queue this new descriptor for transfer.
- * The descriptor for a circular buffer continues
- * to be used until the channel is freed.
- */
- if (txd->cd->circular_buffer)
- dev_err(&pl08x->adev->dev,
- "%s attempting to queue a circular buffer\n",
- __func__);
- else
- list_add_tail(&txd->node,
- &plchan->desc_list);
+ spin_lock_irqsave(&plchan->lock, flags);
/*
* See if we already have a physical channel allocated,
@@ -1362,45 +1225,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
ret = prep_phy_channel(plchan, txd);
if (ret) {
/*
- * No physical channel available, we will
- * stack up the memcpy channels until there is a channel
- * available to handle it whereas slave transfers may
- * have been denied due to platform channel muxing restrictions
- * and since there is no guarantee that this will ever be
- * resolved, and since the signal must be aquired AFTER
- * aquiring the physical channel, we will let them be NACK:ed
- * with -EBUSY here. The drivers can alway retry the prep()
- * call if they are eager on doing this using DMA.
+ * No physical channel was available.
+ *
+ * memcpy transfers can be sorted out at submission time.
+ *
+ * Slave transfers may have been denied due to platform
+ * channel muxing restrictions. Since there is no guarantee
+ * that this will ever be resolved, and the signal must be
+ * acquired AFTER acquiring the physical channel, we will let
+ * them be NACK:ed with -EBUSY here. The drivers can retry
+ * the prep() call if they are eager on doing this using DMA.
*/
if (plchan->slave) {
pl08x_free_txd_list(pl08x, plchan);
- spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
return -EBUSY;
}
- /* Do this memcpy whenever there is a channel ready */
- plchan->state = PL08X_CHAN_WAITING;
- plchan->waiting = txd;
} else
/*
- * Else we're all set, paused and ready to roll,
- * status will switch to PL08X_CHAN_RUNNING when
- * we call issue_pending(). If there is something
- * running on the channel already we don't change
- * its state.
+ * Else we're all set, paused and ready to roll, status
+ * will switch to PL08X_CHAN_RUNNING when we call
+ * issue_pending(). If there is something running on the
+ * channel already we don't change its state.
*/
if (plchan->state == PL08X_CHAN_IDLE)
plchan->state = PL08X_CHAN_PAUSED;
- /*
- * Notice that we leave plchan->lock locked on purpose:
- * it will be unlocked in the subsequent tx_submit()
- * call. This is a consequence of the current API.
- */
+ spin_unlock_irqrestore(&plchan->lock, flags);
return 0;
}
/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
+ unsigned long flags)
+{
+ struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+
+ if (txd) {
+ dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
+ txd->tx.flags = flags;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ INIT_LIST_HEAD(&txd->node);
+
+ /* Always enable error and terminal interrupts */
+ txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+ PL080_CONFIG_TC_IRQ_MASK;
+ }
+ return txd;
+}
+
+/*
* Initialize a descriptor to be used by memcpy submit
*/
static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1304,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_txd *txd;
int ret;
- txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ txd = pl08x_get_txd(plchan, flags);
if (!txd) {
dev_err(&pl08x->adev->dev,
"%s no memory for descriptor\n", __func__);
return NULL;
}
- dma_async_tx_descriptor_init(&txd->tx, chan);
txd->direction = DMA_NONE;
- txd->srcbus.addr = src;
- txd->dstbus.addr = dest;
+ txd->src_addr = src;
+ txd->dst_addr = dest;
+ txd->len = len;
/* Set platform data for m2m */
- txd->cd = &pl08x->pd->memcpy_channel;
+ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl = pl08x->pd->memcpy_channel.cctl &
+ ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+
/* Both to be incremented or the code will break */
- txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
- txd->tx.tx_submit = pl08x_tx_submit;
- txd->tx.callback = NULL;
- txd->tx.callback_param = NULL;
- txd->len = len;
+ txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+
+ if (pl08x->vd->dualmaster)
+ txd->cctl |= pl08x_select_bus(pl08x,
+ pl08x->mem_buses, pl08x->mem_buses);
- INIT_LIST_HEAD(&txd->node);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
- /*
- * NB: the channel lock is held at this point so tx_submit()
- * must be called in direct succession.
- */
return &txd->tx;
}
-struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
@@ -1453,6 +1343,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ u8 src_buses, dst_buses;
int ret;
/*
@@ -1467,14 +1358,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sgl->length, plchan->name);
- txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ txd = pl08x_get_txd(plchan, flags);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
return NULL;
}
- dma_async_tx_descriptor_init(&txd->tx, chan);
-
if (direction != plchan->runtime_direction)
dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
"the direction configured for the PrimeCell\n",
@@ -1486,37 +1375,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
* channel target address dynamically at runtime.
*/
txd->direction = direction;
+ txd->len = sgl->length;
+
+ txd->cctl = plchan->cd->cctl &
+ ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ txd->cctl |= PL080_CONTROL_PROT_SYS;
+
if (direction == DMA_TO_DEVICE) {
- txd->srcbus.addr = sgl->dma_address;
+ txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl |= PL080_CONTROL_SRC_INCR;
+ txd->src_addr = sgl->dma_address;
if (plchan->runtime_addr)
- txd->dstbus.addr = plchan->runtime_addr;
+ txd->dst_addr = plchan->runtime_addr;
else
- txd->dstbus.addr = plchan->cd->addr;
+ txd->dst_addr = plchan->cd->addr;
+ src_buses = pl08x->mem_buses;
+ dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_FROM_DEVICE) {
+ txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl |= PL080_CONTROL_DST_INCR;
if (plchan->runtime_addr)
- txd->srcbus.addr = plchan->runtime_addr;
+ txd->src_addr = plchan->runtime_addr;
else
- txd->srcbus.addr = plchan->cd->addr;
- txd->dstbus.addr = sgl->dma_address;
+ txd->src_addr = plchan->cd->addr;
+ txd->dst_addr = sgl->dma_address;
+ src_buses = plchan->cd->periph_buses;
+ dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cd = plchan->cd;
- txd->tx.tx_submit = pl08x_tx_submit;
- txd->tx.callback = NULL;
- txd->tx.callback_param = NULL;
- txd->len = sgl->length;
- INIT_LIST_HEAD(&txd->node);
+
+ txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
- /*
- * NB: the channel lock is held at this point so tx_submit()
- * must be called in direct succession.
- */
return &txd->tx;
}
@@ -1531,10 +1430,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* Controls applicable to inactive channels */
if (cmd == DMA_SLAVE_CONFIG) {
- dma_set_runtime_config(chan,
- (struct dma_slave_config *)
- arg);
- return 0;
+ return dma_set_runtime_config(chan,
+ (struct dma_slave_config *)arg);
}
/*
@@ -1558,16 +1455,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* Mark physical channel as free and free any slave
* signal
*/
- if ((plchan->phychan->signal >= 0) &&
- pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- plchan->phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, plchan->phychan);
- plchan->phychan = NULL;
+ release_phy_channel(plchan);
}
- /* Stop any pending tasklet */
- tasklet_disable(&plchan->tasklet);
/* Dequeue jobs and free LLIs */
if (plchan->at) {
pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1498,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
/*
* Just check that the device is there and active
- * TODO: turn this bit on/off depending on the number of
- * physical channels actually used, if it is zero... well
- * shut it off. That will save some power. Cut the clock
- * at the same time.
+ * TODO: turn this bit on/off depending on the number of physical channels
+ * actually used, if it is zero... well shut it off. That will save some
+ * power. Cut the clock at the same time.
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
@@ -1620,78 +1508,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
val = readl(pl08x->base + PL080_CONFIG);
val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
- /* We implictly clear bit 1 and that means little-endian mode */
+ /* We implicitly clear bit 1 and that means little-endian mode */
val |= PL080_CONFIG_ENABLE;
writel(val, pl08x->base + PL080_CONFIG);
}
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+ struct device *dev = txd->tx.chan->device->dev;
+
+ if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, txd->src_addr, txd->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, txd->src_addr, txd->len,
+ DMA_TO_DEVICE);
+ }
+ if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, txd->dst_addr, txd->len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, txd->dst_addr, txd->len,
+ DMA_FROM_DEVICE);
+ }
+}
+
static void pl08x_tasklet(unsigned long data)
{
struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
- struct pl08x_phy_chan *phychan = plchan->phychan;
struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ unsigned long flags;
- if (!plchan)
- BUG();
-
- spin_lock(&plchan->lock);
-
- if (plchan->at) {
- dma_async_tx_callback callback =
- plchan->at->tx.callback;
- void *callback_param =
- plchan->at->tx.callback_param;
-
- /*
- * Update last completed
- */
- plchan->lc =
- (plchan->at->tx.cookie);
-
- /*
- * Callback to signal completion
- */
- if (callback)
- callback(callback_param);
+ spin_lock_irqsave(&plchan->lock, flags);
- /*
- * Device callbacks should NOT clear
- * the current transaction on the channel
- * Linus: sometimes they should?
- */
- if (!plchan->at)
- BUG();
+ txd = plchan->at;
+ plchan->at = NULL;
- /*
- * Free the descriptor if it's not for a device
- * using a circular buffer
- */
- if (!plchan->at->cd->circular_buffer) {
- pl08x_free_txd(pl08x, plchan->at);
- plchan->at = NULL;
- }
- /*
- * else descriptor for circular
- * buffers only freed when
- * client has disabled dma
- */
+ if (txd) {
+ /* Update last completed */
+ plchan->lc = txd->tx.cookie;
}
- /*
- * If a new descriptor is queued, set it up
- * plchan->at is NULL here
- */
- if (!list_empty(&plchan->desc_list)) {
+
+ /* If a new descriptor is queued, set it up plchan->at is NULL here */
+ if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *next;
- next = list_first_entry(&plchan->desc_list,
+ next = list_first_entry(&plchan->pend_list,
struct pl08x_txd,
node);
list_del(&next->node);
- plchan->at = next;
- /* Configure the physical channel for the next txd */
- pl08x_config_phychan_for_txd(plchan);
- pl08x_set_cregs(pl08x, plchan->phychan);
- pl08x_enable_phy_chan(pl08x, plchan->phychan);
+
+ pl08x_start_txd(plchan, next);
+ } else if (plchan->phychan_hold) {
+ /*
+ * This channel is still in use - we have a new txd being
+ * prepared and will soon be queued. Don't give up the
+ * physical channel.
+ */
} else {
struct pl08x_dma_chan *waiting = NULL;
@@ -1699,20 +1575,14 @@ static void pl08x_tasklet(unsigned long data)
* No more jobs, so free up the physical channel
* Free any allocated signal on slave transfers too
*/
- if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, phychan);
- plchan->phychan = NULL;
+ release_phy_channel(plchan);
plchan->state = PL08X_CHAN_IDLE;
/*
- * And NOW before anyone else can grab that free:d
- * up physical channel, see if there is some memcpy
- * pending that seriously needs to start because of
- * being stacked up while we were choking the
- * physical channels with data.
+ * And NOW before anyone else can grab that free:d up
+ * physical channel, see if there is some memcpy pending
+ * that seriously needs to start because of being stacked
+ * up while we were choking the physical channels with data.
*/
list_for_each_entry(waiting, &pl08x->memcpy.channels,
chan.device_node) {
@@ -1724,6 +1594,7 @@ static void pl08x_tasklet(unsigned long data)
ret = prep_phy_channel(waiting,
waiting->waiting);
BUG_ON(ret);
+ waiting->phychan_hold--;
waiting->state = PL08X_CHAN_RUNNING;
waiting->waiting = NULL;
pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1603,25 @@ static void pl08x_tasklet(unsigned long data)
}
}
- spin_unlock(&plchan->lock);
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ if (txd) {
+ dma_async_tx_callback callback = txd->tx.callback;
+ void *callback_param = txd->tx.callback_param;
+
+ /* Don't try to unmap buffers on slave channels */
+ if (!plchan->slave)
+ pl08x_unmap_buffers(txd);
+
+ /* Free the descriptor */
+ spin_lock_irqsave(&plchan->lock, flags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ /* Callback to signal completion */
+ if (callback)
+ callback(callback_param);
+ }
}
static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1633,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
val = readl(pl08x->base + PL080_ERR_STATUS);
if (val) {
- /*
- * An error interrupt (on one or more channels)
- */
+ /* An error interrupt (on one or more channels) */
dev_err(&pl08x->adev->dev,
"%s error interrupt, register value 0x%08x\n",
__func__, val);
@@ -1770,9 +1657,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
mask |= (1 << i);
}
}
- /*
- * Clear only the terminal interrupts on channels we processed
- */
+ /* Clear only the terminal interrupts on channels we processed */
writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1676,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
int i;
INIT_LIST_HEAD(&dmadev->channels);
+
/*
* Register as many many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
@@ -1819,16 +1705,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
return -ENOMEM;
}
}
+ if (chan->cd->circular_buffer) {
+ dev_err(&pl08x->adev->dev,
+ "channel %s: circular buffers not supported\n",
+ chan->name);
+ kfree(chan);
+ continue;
+ }
dev_info(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
chan->chan.device = dmadev;
- atomic_set(&chan->last_issued, 0);
- chan->lc = atomic_read(&chan->last_issued);
+ chan->chan.cookie = 0;
+ chan->lc = 0;
spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->desc_list);
+ INIT_LIST_HEAD(&chan->pend_list);
tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan);
@@ -1898,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
- seq_printf(s, "%s\t\t\%s\n", chan->name,
+ seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1906,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
- seq_printf(s, "%s\t\t\%s\n", chan->name,
+ seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1942,7 +1835,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
- struct vendor_data *vd = id->data;
+ const struct vendor_data *vd = id->data;
int ret = 0;
int i;
@@ -1990,6 +1883,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
pl08x->adev = adev;
pl08x->vd = vd;
+ /* By default, AHB1 only. If dualmaster, from platform */
+ pl08x->lli_buses = PL08X_AHB1;
+ pl08x->mem_buses = PL08X_AHB1;
+ if (pl08x->vd->dualmaster) {
+ pl08x->lli_buses = pl08x->pd->lli_buses;
+ pl08x->mem_buses = pl08x->pd->mem_buses;
+ }
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1910,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
/* Turn on the PL08x */
pl08x_ensure_on(pl08x);
- /*
- * Attach the interrupt handler
- */
+ /* Attach the interrupt handler */
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
- vd->name, pl08x);
+ DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);
@@ -2087,8 +1986,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
- vd->name, adev->res.start);
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), amba_rev(adev),
+ (unsigned long long)adev->res.start, adev->irq[0]);
return 0;
out_no_slave_reg:
@@ -2115,13 +2015,11 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
- .name = "PL080",
.channels = 8,
.dualmaster = true,
};
static struct vendor_data vendor_pl081 = {
- .name = "PL081",
.channels = 2,
.dualmaster = false,
};
@@ -2160,7 +2058,7 @@ static int __init pl08x_init(void)
retval = amba_driver_register(&pl08x_amba_driver);
if (retval)
printk(KERN_WARNING DRIVER_NAME
- "failed to register as an amba device (%d)\n",
+ "failed to register as an AMBA device (%d)\n",
retval);
return retval;
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81cff53..3d7d705f026f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
- /* unmap dma addresses */
+ /* unmap dma addresses (not on slave channels) */
if (!atchan->chan_common.private) {
struct device *parent = chan2parent(&atchan->chan_common);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.ctrlb = ctrlb;
desc->txd.cookie = 0;
- async_tx_ack(&desc->txd);
if (!first) {
first = desc;
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
- desc->txd.flags = flags; /* client is in control of this ack */
+ first->txd.flags = flags; /* client is in control of this ack */
return &first->txd;
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc)
goto err_desc_get;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc)
goto err_desc_get;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first->txd.cookie = -EBUSY;
first->len = total_len;
- /* last link descriptor of list is responsible of flags */
- prev->txd.flags = flags; /* client is in control of this ack */
+ /* first link descriptor of list is responsible of flags */
+ first->txd.flags = flags; /* client is in control of this ack */
return &first->txd;
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
- spin_lock_bh(&atchan->lock);
atc_advance_work(atchan);
- spin_unlock_bh(&atchan->lock);
}
+ spin_unlock_bh(&atchan->lock);
}
/**
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void)
{
return platform_driver_probe(&at_dma_driver, at_dma_probe);
}
-module_init(at_dma_init);
+subsys_initcall(at_dma_init);
static void __exit at_dma_exit(void)
{
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d21692..4de947a450fc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
/*
* Freescale MPC85xx, MPC83xx DMA Engine support
*
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
*
* Author:
* Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+
dev_set_drvdata(&op->dev, fdev);
/*
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 78266382797e..798f46a4590d 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_LO*/
ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1;
- ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
- ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
+ /*
+ * Here we need some translation from "enum dma_slave_buswidth"
+ * to the format for our dma controller
+ * standard intel_mid_dmac's format
+ * 1 Byte 0b000
+ * 2 Bytes 0b001
+ * 4 Bytes 0b010
+ */
+ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
+ ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
+
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
BUG_ON(!mids);
if (!midc->dma->pimr_mask) {
- pr_debug("MDMA: SG list is not supported by this controller\n");
- return NULL;
+ /* We can still handle sg list with only one item */
+ if (sg_len == 1) {
+ txd = intel_mid_dma_prep_memcpy(chan,
+ mids->dma_slave.dst_addr,
+ mids->dma_slave.src_addr,
+ sgl->length,
+ flags);
+ return txd;
+ } else {
+ pr_warn("MDMA: SG list is not supported by this controller\n");
+ return NULL;
+ }
}
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
}
+
desc = to_intel_mid_dma_desc(txd);
desc->dirn = direction;
ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
/*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
- if (!mid) {
- pr_err("ERR_MDMA:null pointer mid\n");
- return -EINVAL;
- }
-
pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
tfr_status &= mid->intr_mask;
if (tfr_status) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452923b8..c6b01f535b29 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@ out:
return err;
}
-#ifdef CONFIG_MD_RAID6_PQ
+#ifdef CONFIG_RAID6_PQ
static int __devinit
iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
{
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
- #ifdef CONFIG_MD_RAID6_PQ
+ #ifdef CONFIG_RAID6_PQ
ret = iop_adma_pq_zero_sum_self_test(adev);
dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
#else
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89420d0..1c38418ae61f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
+ * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
}
/* PCI Device ID of DMA device */
-#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810
-#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
+#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
+#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
+#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
static const struct pci_device_id pch_dma_id_table[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
{ 0, },
};
@@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+ "DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a553205..6e1d46a65d0e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
/*
- * Copyright (C) ST-Ericsson SA 2007-2010
+ * Copyright (C) Ericsson AB 2007-2008
+ * Copyright (C) ST-Ericsson SA 2008-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
return d;
}
-/* Support functions for logical channels */
+static int d40_psize_2_burst_size(bool is_log, int psize)
+{
+ if (is_log) {
+ if (psize == STEDMA40_PSIZE_LOG_1)
+ return 1;
+ } else {
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ return 1;
+ }
+
+ return 2 << psize;
+}
+
+/*
+ * The dma only supports transmitting packages up to
+ * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
+ * dma elements required to send the entire sg list
+ */
+static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
+{
+ int dmalen;
+ u32 max_w = max(data_width1, data_width2);
+ u32 min_w = min(data_width1, data_width2);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+ if (seg_max > STEDMA40_MAX_SEG_SIZE)
+ seg_max -= (1 << max_w);
+
+ if (!IS_ALIGNED(size, 1 << max_w))
+ return -EINVAL;
+
+ if (size <= seg_max)
+ dmalen = 1;
+ else {
+ dmalen = size / seg_max;
+ if (dmalen * seg_max < size)
+ dmalen++;
+ }
+ return dmalen;
+}
+
+static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
+ u32 data_width1, u32 data_width2)
+{
+ struct scatterlist *sg;
+ int i;
+ int len = 0;
+ int ret;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ ret = d40_size_2_dmalen(sg_dma_len(sg),
+ data_width1, data_width2);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ return len;
+}
+/* Support functions for logical channels */
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
res = -EINVAL;
}
+ if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
+ (1 << conf->src_info.data_width) !=
+ d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
+ (1 << conf->dst_info.data_width)) {
+ /*
+ * The DMAC hardware only supports
+ * src (burst x width) == dst (burst x width)
+ */
+
+ dev_err(&d40c->chan.dev->device,
+ "[%s] src (burst x width) != dst (burst x width)\n",
+ __func__);
+ res = -EINVAL;
+ }
+
return res;
}
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
if (d40d == NULL)
goto err;
- d40d->lli_len = sgl_len;
+ d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ goto err;
+ }
+
d40d->lli_current = 0;
d40d->txd.flags = dma_flags;
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
d40d->lli_log.src,
d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width);
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
(void) d40_log_sg_to_lli(sgl_dst,
sgl_len,
d40d->lli_log.dst,
d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width);
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width);
} else {
- if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
d40c->dma_cfg.src_info.psize);
if (res < 0)
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.psize);
if (res < 0)
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
struct d40_chan *d40c = container_of(chan, struct d40_chan,
chan);
unsigned long flags;
- int err = 0;
if (d40c->phy_chan == NULL) {
dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
}
d40d->txd.flags = dma_flags;
+ d40d->lli_len = d40_size_2_dmalen(size,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ goto err;
+ }
+
dma_async_tx_descriptor_init(&d40d->txd, chan);
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
}
- d40d->lli_len = 1;
d40d->lli_current = 0;
- d40_log_fill_lli(d40d->lli_log.src,
- src,
- size,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- true);
+ if (d40_log_buf_to_lli(d40d->lli_log.src,
+ src,
+ size,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ true) == NULL)
+ goto err;
- d40_log_fill_lli(d40d->lli_log.dst,
- dst,
- size,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- true);
+ if (d40_log_buf_to_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
+ true) == NULL)
+ goto err;
} else {
- if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
}
- err = d40_phy_fill_lli(d40d->lli_phy.src,
+ if (d40_phy_buf_to_lli(d40d->lli_phy.src,
src,
size,
d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c->src_def_cfg,
true,
d40c->dma_cfg.src_info.data_width,
- false);
- if (err)
- goto err_fill_lli;
+ d40c->dma_cfg.dst_info.data_width,
+ false) == NULL)
+ goto err;
- err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
dst,
size,
d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c->dst_def_cfg,
true,
d40c->dma_cfg.dst_info.data_width,
- false);
-
- if (err)
- goto err_fill_lli;
+ d40c->dma_cfg.src_info.data_width,
+ false) == NULL)
+ goto err;
(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
-err_fill_lli:
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed filling in PHY LLI\n", __func__);
err:
if (d40d)
d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
dma_addr_t dev_addr = 0;
int total_size;
- if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
return -ENOMEM;
}
- d40d->lli_len = sg_len;
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
dma_addr_t dst_dev_addr;
int res;
- if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
return -ENOMEM;
}
- d40d->lli_len = sgl_len;
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
d40c->dma_cfg.src_info.psize);
if (res < 0)
return res;
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.psize);
if (res < 0)
return res;
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
psize = STEDMA40_PSIZE_PHY_8;
else if (config_maxburst >= 4)
psize = STEDMA40_PSIZE_PHY_4;
+ else if (config_maxburst >= 2)
+ psize = STEDMA40_PSIZE_PHY_2;
else
psize = STEDMA40_PSIZE_PHY_1;
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb88b255..0b096a38322d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
*dst_cfg = dst;
}
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device)
+static int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
{
int num_elems;
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
else
num_elems = 2 << psize;
- /*
- * Size is 16bit. data_width is 8, 16, 32 or 64 bit
- * Block large than 64 KiB must be split.
- */
- if (data_size > (0xffff << data_width))
- return -EINVAL;
-
/* Must be aligned */
if (!IS_ALIGNED(data, 0x1 << data_width))
return -EINVAL;
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
return 0;
}
+static int d40_seg_size(int size, int data_width1, int data_width2)
+{
+ u32 max_w = max(data_width1, data_width2);
+ u32 min_w = min(data_width1, data_width2);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+ if (seg_max > STEDMA40_MAX_SEG_SIZE)
+ seg_max -= (1 << max_w);
+
+ if (size <= seg_max)
+ return size;
+
+ if (size <= 2 * seg_max)
+ return ALIGN(size / 2, 1 << max_w);
+
+ return seg_max;
+}
+
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+ dma_addr_t addr,
+ u32 size,
+ int psize,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width1,
+ u32 data_width2,
+ bool is_device)
+{
+ int err;
+ dma_addr_t next = lli_phys;
+ int size_rest = size;
+ int size_seg = 0;
+
+ do {
+ size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_rest -= size_seg;
+
+ if (term_int && size_rest == 0)
+ next = 0;
+ else
+ next = ALIGN(next + sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ err = d40_phy_fill_lli(lli,
+ addr,
+ size_seg,
+ psize,
+ next,
+ reg_cfg,
+ !next,
+ data_width1,
+ is_device);
+
+ if (err)
+ goto err;
+
+ lli++;
+ if (!is_device)
+ addr += size_seg;
+ } while (size_rest);
+
+ return lli;
+
+ err:
+ return NULL;
+}
+
int d40_phy_sg_to_lli(struct scatterlist *sg,
int sg_len,
dma_addr_t target,
- struct d40_phy_lli *lli,
+ struct d40_phy_lli *lli_sg,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width,
+ u32 data_width1,
+ u32 data_width2,
int psize)
{
int total_size = 0;
int i;
struct scatterlist *current_sg = sg;
- dma_addr_t next_lli_phys;
dma_addr_t dst;
- int err = 0;
+ struct d40_phy_lli *lli = lli_sg;
+ dma_addr_t l_phys = lli_phys;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /* If this scatter list entry is the last one, no next link */
- if (sg_len - 1 == i)
- next_lli_phys = 0;
- else
- next_lli_phys = ALIGN(lli_phys + (i + 1) *
- sizeof(struct d40_phy_lli),
- D40_LLI_ALIGN);
-
if (target)
dst = target;
else
dst = sg_phys(current_sg);
- err = d40_phy_fill_lli(&lli[i],
- dst,
- sg_dma_len(current_sg),
- psize,
- next_lli_phys,
- reg_cfg,
- !next_lli_phys,
- data_width,
- target == dst);
- if (err)
- goto err;
+ l_phys = ALIGN(lli_phys + (lli - lli_sg) *
+ sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
+
+ lli = d40_phy_buf_to_lli(lli,
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ l_phys,
+ reg_cfg,
+ sg_len - 1 == i,
+ data_width1,
+ data_width2,
+ target == dst);
+ if (lli == NULL)
+ return -EINVAL;
}
return total_size;
-err:
- return err;
}
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
}
-void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data, u32 data_size,
- u32 reg_cfg,
- u32 data_width,
- bool addr_inc)
+static void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 reg_cfg,
+ u32 data_width,
+ bool addr_inc)
{
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
lli->lcsp02 = ((data_size >> data_width) <<
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+
+ BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
+
/* 16 LSBs address of the current element */
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
/* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
+ struct d40_log_lli *lli_src = lli->src;
+ struct d40_log_lli *lli_dst = lli->dst;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
if (direction == DMA_TO_DEVICE) {
- d40_log_fill_lli(&lli->src[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- true);
- d40_log_fill_lli(&lli->dst[i],
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- false);
+ lli_src =
+ d40_log_buf_to_lli(lli_src,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp->lcsp1, src_data_width,
+ dst_data_width,
+ true);
+ lli_dst =
+ d40_log_buf_to_lli(lli_dst,
+ dev_addr,
+ sg_dma_len(current_sg),
+ lcsp->lcsp3, dst_data_width,
+ src_data_width,
+ false);
} else {
- d40_log_fill_lli(&lli->dst[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- true);
- d40_log_fill_lli(&lli->src[i],
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- false);
+ lli_dst =
+ d40_log_buf_to_lli(lli_dst,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp->lcsp3, dst_data_width,
+ src_data_width,
+ true);
+ lli_src =
+ d40_log_buf_to_lli(lli_src,
+ dev_addr,
+ sg_dma_len(current_sg),
+ lcsp->lcsp1, src_data_width,
+ dst_data_width,
+ false);
}
}
return total_size;
}
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+ dma_addr_t addr,
+ int size,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width1,
+ u32 data_width2,
+ bool addr_inc)
+{
+ struct d40_log_lli *lli = lli_sg;
+ int size_rest = size;
+ int size_seg = 0;
+
+ do {
+ size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_rest -= size_seg;
+
+ d40_log_fill_lli(lli,
+ addr,
+ size_seg,
+ lcsp13, data_width1,
+ addr_inc);
+ if (addr_inc)
+ addr += size_seg;
+ lli++;
+ } while (size_rest);
+
+ return lli;
+}
+
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width)
+ u32 data_width1, u32 data_width2)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
+ struct d40_log_lli *lli = lli_sg;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
-
- d40_log_fill_lli(&lli_sg[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp13, data_width,
- true);
+ lli = d40_log_buf_to_lli(lli,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp13,
+ data_width1, data_width2, true);
}
return total_size;
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b907544..9cc43495bea2 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width,
+ u32 data_width1,
+ u32 data_width2,
int psize);
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device);
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width1,
+ u32 data_width2,
+ bool is_device);
void d40_phy_lli_write(void __iomem *virtbase,
u32 phy_chan_num,
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */
-void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data,
- u32 data_size,
- u32 reg_cfg,
- u32 data_width,
- bool addr_inc);
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+ dma_addr_t addr,
+ int size,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width1, u32 data_width2,
+ bool addr_inc);
int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width);
+ u32 data_width1, u32 data_width2);
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0307d601f5e5..5c4f9b9ecdc0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,25 +607,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
-void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
-{
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
-
- info->fix.line_length = fb->pitch;
- return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -835,7 +816,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
- drm_fb_helper_fill_fix(info, fb_helper->fb);
}
mutex_unlock(&dev->mode_config.mutex);
@@ -973,7 +953,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) {
info->var.pixclock = 0;
- drm_fb_helper_fill_fix(info, fb_helper->fb);
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
@@ -1000,6 +979,26 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
+{
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.type_aux = 0;
+
+ info->fix.line_length = pitch;
+ return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ee145a257287..512782728e51 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,6 +148,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
info->pixmap.size = 64*1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e32573b3a3..01bffc4412d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@ enum nouveau_flags {
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
#define NVOBJ_FLAG_VM (1 << 3)
+#define NVOBJ_FLAG_VM_USER (1 << 4)
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if ((dev_priv->chipset & 0xf0) == 0x60)
+ return 1;
+
+ return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
+}
+
/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1
#define NV_MEM_ACCESS_WO 2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a26d04740c88..60769d2f9a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,13 +352,14 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = dev->mode_config.fb_base +
- (nvbo->bo.mem.start << PAGE_SHIFT);
+ info->fix.smem_start = nvbo->bo.mem.bus.base +
+ nvbo->bo.mem.bus.offset;
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb104bb..26347b7cd872 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
- int i;
+ u32 total = 0, free = 0;
mutex_lock(&mm->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) {
- printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
- prefix, r->free ? "free" : "used", r->type,
- ((u64)r->offset << 12),
+ printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+ prefix, r->type, ((u64)r->offset << 12),
(((u64)r->offset + r->length) << 12));
+
total += r->length;
- ttotal[r->type] += r->length;
- if (r->free)
- tfree[r->type] += r->length;
- else
- tused[r->type] += r->length;
+ if (!r->type)
+ free += r->length;
}
mutex_unlock(&mm->mutex);
- printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
- for (i = 0; i < 3; i++) {
- printk(KERN_DEBUG "%s type %d: 0x%010llx, "
- "used 0x%010llx, free 0x%010llx\n", prefix,
- i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
- }
+ printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
+ prefix, (u64)total << 12, (u64)free << 12);
+ printk(KERN_DEBUG "%s block: 0x%08x\n",
+ prefix, mm->block_size << 12);
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11eb701b..8844b50c3e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
b->offset = a->offset;
b->length = size;
- b->free = a->free;
b->type = a->type;
a->offset += size;
a->length -= size;
list_add_tail(&b->nl_entry, &a->nl_entry);
- if (b->free)
+ if (b->type == 0)
list_add_tail(&b->fl_entry, &a->fl_entry);
return b;
}
-static struct nouveau_mm_node *
-nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
-{
- struct nouveau_mm_node *prev, *next;
-
- /* try to merge with free adjacent entries of same type */
- prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
- if (this->nl_entry.prev != &rmm->nodes) {
- if (prev->free && prev->type == this->type) {
- prev->length += this->length;
- region_put(rmm, this);
- this = prev;
- }
- }
-
- next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
- if (this->nl_entry.next != &rmm->nodes) {
- if (next->free && next->type == this->type) {
- next->offset = this->offset;
- next->length += this->length;
- region_put(rmm, this);
- this = next;
- }
- }
-
- return this;
-}
+#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
- u32 block_s, block_l;
+ struct nouveau_mm_node *prev = node(this, prev);
+ struct nouveau_mm_node *next = node(this, next);
- this->free = true;
list_add(&this->fl_entry, &rmm->free);
- this = nouveau_mm_merge(rmm, this);
-
- /* any entirely free blocks now? we'll want to remove typing
- * on them now so they can be use for any memory allocation
- */
- block_s = roundup(this->offset, rmm->block_size);
- if (block_s + rmm->block_size > this->offset + this->length)
- return;
+ this->type = 0;
- /* split off any still-typed region at the start */
- if (block_s != this->offset) {
- if (!region_split(rmm, this, block_s - this->offset))
- return;
+ if (prev && prev->type == 0) {
+ prev->length += this->length;
+ region_put(rmm, this);
+ this = prev;
}
- /* split off the soon-to-be-untyped block(s) */
- block_l = rounddown(this->length, rmm->block_size);
- if (block_l != this->length) {
- this = region_split(rmm, this, block_l);
- if (!this)
- return;
+ if (next && next->type == 0) {
+ next->offset = this->offset;
+ next->length += this->length;
+ region_put(rmm, this);
}
-
- /* mark as having no type, and retry merge with any adjacent
- * untyped blocks
- */
- this->type = 0;
- nouveau_mm_merge(rmm, this);
}
int
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **pnode)
{
- struct nouveau_mm_node *this, *tmp, *next;
- u32 splitoff, avail, alloc;
-
- list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
- next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
- if (this->nl_entry.next == &rmm->nodes)
- next = NULL;
-
- /* skip wrongly typed blocks */
- if (this->type && this->type != type)
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 min = size_nc ? size_nc : size;
+ u32 align_mask = align - 1;
+ u32 splitoff;
+ u32 s, e;
+
+ list_for_each_entry(this, &rmm->free, fl_entry) {
+ e = this->offset + this->length;
+ s = this->offset;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, rmm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type)
+ e = rounddown(e, rmm->block_size);
+
+ s = (s + align_mask) & ~align_mask;
+ e &= ~align_mask;
+ if (s > e || e - s < min)
continue;
- /* account for alignment */
- splitoff = this->offset & (align - 1);
- if (splitoff)
- splitoff = align - splitoff;
-
- if (this->length <= splitoff)
- continue;
-
- /* determine total memory available from this, and
- * the next block (if appropriate)
- */
- avail = this->length;
- if (next && next->free && (!next->type || next->type == type))
- avail += next->length;
-
- avail -= splitoff;
-
- /* determine allocation size */
- if (size_nc) {
- alloc = min(avail, size);
- alloc = rounddown(alloc, size_nc);
- if (alloc == 0)
- continue;
- } else {
- alloc = size;
- if (avail < alloc)
- continue;
- }
-
- /* untyped block, split off a chunk that's a multiple
- * of block_size and type it
- */
- if (!this->type) {
- u32 block = roundup(alloc + splitoff, rmm->block_size);
- if (this->length < block)
- continue;
-
- this = region_split(rmm, this, block);
- if (!this)
- return -ENOMEM;
-
- this->type = type;
- }
-
- /* stealing memory from adjacent block */
- if (alloc > this->length) {
- u32 amount = alloc - (this->length - splitoff);
-
- if (!next->type) {
- amount = roundup(amount, rmm->block_size);
-
- next = region_split(rmm, next, amount);
- if (!next)
- return -ENOMEM;
-
- next->type = type;
- }
-
- this->length += amount;
- next->offset += amount;
- next->length -= amount;
- if (!next->length) {
- list_del(&next->nl_entry);
- list_del(&next->fl_entry);
- kfree(next);
- }
- }
-
- if (splitoff) {
- if (!region_split(rmm, this, splitoff))
- return -ENOMEM;
- }
+ splitoff = s - this->offset;
+ if (splitoff && !region_split(rmm, this, splitoff))
+ return -ENOMEM;
- this = region_split(rmm, this, alloc);
- if (this == NULL)
+ this = region_split(rmm, this, min(size, e - s));
+ if (!this)
return -ENOMEM;
- this->free = false;
+ this->type = type;
list_del(&this->fl_entry);
*pnode = this;
return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return -ENOMEM;
- heap->free = true;
heap->offset = roundup(offset, block);
heap->length = rounddown(offset + length, block) - heap->offset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af3844933036..798eaf39691c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@ struct nouveau_mm_node {
struct list_head fl_entry;
struct list_head rl_entry;
- bool free;
- int type;
-
+ u8 type;
u32 offset;
u32 length;
};
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a0375a..8870d72388c8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
/* curie */
- if (dev_priv->chipset >= 0x60 ||
- 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+ if (nv44_graph_class(dev))
NVOBJ_CLASS(dev, 0x4497, GR);
else
NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce585093264e..f70447d131d7 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
*/
static int
-nv40_graph_4097(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if ((dev_priv->chipset & 0xf0) == 0x60)
- return 0;
-
- return !!(0x0baf & (1 << dev_priv->chipset));
-}
-
-static int
nv40_graph_vs_count(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
- if (!nv40_graph_4097(ctx->dev)) {
+ if (nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
- if (nv40_graph_4097(ctx->dev)) {
+ if (!nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{
- int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+ int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+ vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+ cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c12ab6a..03c0d4c3f355 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
int
nv40_mc_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t tmp;
-
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
- switch (dev_priv->chipset) {
- case 0x44:
- case 0x46: /* G72 */
- case 0x4e:
- case 0x4c: /* C51_G7X */
- tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ if (nv44_graph_class(dev)) {
+ u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
nv_wr32(dev, NV40_PMC_1700, tmp);
nv_wr32(dev, NV40_PMC_1704, 0);
nv_wr32(dev, NV40_PMC_1708, 0);
nv_wr32(dev, NV40_PMC_170C, tmp);
- break;
- default:
- break;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd19a4b..ea0041810ae3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
gpuobj->vinst = node->vram->offset;
if (gpuobj->flags & NVOBJ_FLAG_VM) {
- ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
- NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+ u32 flags = NV_MEM_ACCESS_RW;
+ if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
+ flags |= NV_MEM_ACCESS_SYS;
+
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
&node->chan_vma);
if (ret) {
vram->put(dev, &node->vram);
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5d5fa4..e6ea7d83187f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251bb0ff4..e4e83c2caf5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
phys >>= 8;
phys |= 0x00000001; /* present */
-// if (vma->access & NV_MEM_ACCESS_SYS)
-// phys |= 0x00000002;
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= 0x00000002;
phys |= ((u64)target << 32);
phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebdcdc0e..a8973acb3987 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3002,31 +3002,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
return 0;
}
-static bool evergreen_card_posted(struct radeon_device *rdev)
-{
- u32 reg;
-
- /* first check CRTCs */
- if (rdev->flags & RADEON_IS_IGP)
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- else
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
- if (reg & EVERGREEN_CRTC_MASTER_EN)
- return true;
-
- /* then check MEM_SIZE, in case the crtcs are off */
- if (RREG32(CONFIG_MEMSIZE))
- return true;
-
- return false;
-}
-
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
@@ -3063,7 +3038,7 @@ int evergreen_init(struct radeon_device *rdev)
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
- if (!evergreen_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -3158,6 +3133,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
if (rdev->flags & RADEON_IS_IGP)
return;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595b14e1..46da5142b131 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
u32 status, tmp;
+ int ret = 0;
- r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev)
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
void r100_set_common_regs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e709f270..cf862ca580bf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -405,12 +405,13 @@ int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
u32 status, tmp;
+ int ret = 0;
- r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -451,11 +452,11 @@ int r300_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716267c0..aca2236268fa 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2358,24 +2358,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
/* FIXME: implement */
}
-
-bool r600_card_posted(struct radeon_device *rdev)
-{
- uint32_t reg;
-
- /* first check CRTCs */
- reg = RREG32(D1CRTC_CONTROL) |
- RREG32(D2CRTC_CONTROL);
- if (reg & CRTC_EN)
- return true;
-
- /* then check MEM_SIZE, in case the crtcs are off */
- if (RREG32(CONFIG_MEMSIZE))
- return true;
-
- return false;
-}
-
int r600_startup(struct radeon_device *rdev)
{
int r;
@@ -2536,7 +2518,7 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -3658,6 +3640,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
if (rdev->flags & RADEON_IS_IGP)
return;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e9486630a467..71d2a554bbe6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@ extern int radeon_tv;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f28c29..d5680a0c87af 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -104,6 +104,7 @@ int radeon_tv = 1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca32e9c1e91d..66324b5bb5ba 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,6 +225,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd39d787..9177f9191837 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
-0x000286F8 GDS_ADDR_SIZE
+0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192acaab5f..5afe294ed51f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
int rs600_asic_reset(struct radeon_device *rdev)
{
- u32 status, tmp;
-
struct rv515_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
- /* Stops all mc clients */
- rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
rv515_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa3a79a..491dc9000655 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1268,7 +1268,7 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -1372,6 +1372,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
if (rdev->flags & RADEON_IS_IGP)
return;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c7db6980e3a3..f0bd5bcdf563 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@ static int i2c_device_pm_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->suspend ? pm->suspend(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_SUSPEND);
+ if (pm)
+ return pm_generic_suspend(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
static int i2c_device_pm_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
if (pm)
- ret = pm->resume ? pm->resume(dev) : 0;
+ return pm_generic_resume(dev);
else
- ret = i2c_legacy_resume(dev);
-
- return ret;
+ return i2c_legacy_resume(dev);
}
static int i2c_device_pm_freeze(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->freeze ? pm->freeze(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_FREEZE);
+ if (pm)
+ return pm_generic_freeze(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
static int i2c_device_pm_thaw(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->thaw ? pm->thaw(dev) : 0;
- }
-
- return i2c_legacy_resume(dev);
+ if (pm)
+ return pm_generic_thaw(dev);
+ else
+ return i2c_legacy_resume(dev);
}
static int i2c_device_pm_poweroff(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->poweroff ? pm->poweroff(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
+ if (pm)
+ return pm_generic_poweroff(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
static int i2c_device_pm_restore(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
if (pm)
- ret = pm->restore ? pm->restore(dev) : 0;
+ return pm_generic_restore(dev);
else
- ret = i2c_legacy_resume(dev);
-
- if (!ret) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
+ return i2c_legacy_resume(dev);
}
#else /* !CONFIG_PM_SLEEP */
#define i2c_device_pm_suspend NULL
@@ -1021,6 +993,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
+ if (client && strcmp(client->name, "dummy"))
+ i2c_unregister_device(client);
+ return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
if (client)
i2c_unregister_device(client);
return 0;
@@ -1075,8 +1055,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
- checking the returned value. */
+ * check the returned value. This is a two-pass process, because
+ * we can't remove the dummy devices during the first pass: they
+ * could have been instantiated by real devices wishing to clean
+ * them up properly, so we give them a chance to do that first. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+ res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
if (res)
return res;
+ /* Drivers should switch to dev_pm_ops instead. */
+ if (driver->suspend)
+ pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
+ driver->driver.name);
+ if (driver->resume)
+ pr_warn("i2c-core: driver [%s] using legacy resume method\n",
+ driver->driver.name);
+
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index dffa0ac7c4f0..38e4eb1bb965 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -350,6 +350,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
if (!d->dm_dev.bdev)
return;
+ bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
d->dm_dev.bdev = NULL;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cf8594c5ea21..b76cfc89e1b5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1912,6 +1912,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
MD_BUG();
return;
}
+ bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index fa19d849a920..dd84124f4209 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
* your option) any later version.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -20,8 +21,12 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/mmc/host.h>
+#ifdef CONFIG_PPC
#include <asm/machdep.h>
+#endif
#include "sdhci-of.h"
#include "sdhci.h"
@@ -112,7 +117,11 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
return true;
/* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+ return false;
+#endif
}
static int __devinit sdhci_of_probe(struct platform_device *ofdev,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b1f768917395..77414702cb00 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@ config MTD_PARTITIONS
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
+if MTD_PARTITIONS
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
- depends on MTD_PARTITIONS
---help---
RedBoot is a ROM monitor and bootloader which deals with multiple
'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@ config MTD_REDBOOT_PARTS
SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
example.
+if MTD_REDBOOT_PARTS
+
config MTD_REDBOOT_DIRECTORY_BLOCK
int "Location of RedBoot partition table"
- depends on MTD_REDBOOT_PARTS
default "-1"
---help---
This option is the Linux counterpart to the
@@ -91,18 +93,18 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
config MTD_REDBOOT_PARTS_UNALLOCATED
bool "Include unallocated flash regions"
- depends on MTD_REDBOOT_PARTS
help
If you need to register each unallocated flash region as a MTD
'partition', enable this option.
config MTD_REDBOOT_PARTS_READONLY
bool "Force read-only for RedBoot system images"
- depends on MTD_REDBOOT_PARTS
help
If you need to force read-only for 'RedBoot', 'RedBoot Config' and
'FIS directory' images, enable this option.
+endif # MTD_REDBOOT_PARTS
+
config MTD_CMDLINE_PARTS
bool "Command line partition table parsing"
depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@ config MTD_CMDLINE_PARTS
config MTD_AFS_PARTS
tristate "ARM Firmware Suite partition parsing"
- depends on ARM && MTD_PARTITIONS
+ depends on ARM
---help---
The ARM Firmware Suite allows the user to divide flash devices into
multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@ config MTD_AFS_PARTS
example.
config MTD_OF_PARTS
- tristate "Flash partition map based on OF description"
- depends on OF && MTD_PARTITIONS
+ def_bool y
+ depends on OF
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
@@ -167,10 +169,11 @@ config MTD_OF_PARTS
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
- depends on MTD_PARTITIONS
---help---
TI AR7 partitioning support
+endif # MTD_PARTITIONS
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc533395..d4e7f25b1ebb 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o
mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
-obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b44416..a8c3e1c9b02a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
-static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@ static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
-static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
+static void fixup_intel_strataflash(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
+static void fixup_no_write_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@ static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
}
#endif
-static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
}
-static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
-static void fixup_use_point(struct mtd_info *mtd, void *param)
+static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@ static void fixup_use_point(struct mtd_info *mtd, void *param)
}
}
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
/*
* Some chips power-up with all sectors locked by default.
*/
-static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
}
static struct cfi_fixup cfi_fixup_table[] = {
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
- { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
- { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+ { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+ { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
#endif
#if !FORCE_WORD_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
- { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
- { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
- { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+ { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+ { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
+ { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@ static struct cfi_fixup fixup_table[] = {
* well. This table is to pick all cases where
* we know that is the case.
*/
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+ { 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d87977..f072fcfde04e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
#ifdef AMD_BOOTLOC_BUG
/* Wheee. Bring me the head of someone at AMD. */
-static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
+static void fixup_amd_bootblock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
}
#endif
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
}
/* Atmel chips don't use the same PRI format as AMD chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
-static void fixup_use_secsi(struct mtd_info *mtd, void *param)
+static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
}
-static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
+static void fixup_use_erase_chip(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
* Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
* locked by default.
*/
-static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@ static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
cfi->cfiq->NumEraseRegions = 1;
}
-static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_sst39vf(struct mtd_info *mtd, void *param)
cfi->addr_unlock2 = 0x2AAA;
}
-static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
cfi->sector_erase_cmd = CMD(0x50);
}
-static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- fixup_sst39vf_rev_b(mtd, param);
+ fixup_sst39vf_rev_b(mtd);
/*
* CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
}
-static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
}
}
-static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
- { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
- { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
- { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
- { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
- { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
- { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
- { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
- { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
- { 0, 0, NULL, NULL }
+ { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+ { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+ { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+ { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+ { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+ { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+ { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+ { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+ { 0, 0, NULL }
};
static struct cfi_fixup cfi_fixup_table[] = {
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
#ifdef AMD_BOOTLOC_BUG
- { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
#endif
- { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
- { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
- { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
- { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
- { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
+ { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+ { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
#if !FORCE_WORD_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
- { 0, 0, NULL, NULL }
+ { 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@ static struct cfi_fixup fixup_table[] = {
* well. This table is to pick all cases where
* we know that is the case.
*/
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
- { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+ { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+ { 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_amdstd *extp)
{
- if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
- extp->MajorVersion == '0')
- extp->MajorVersion = '1';
+ if (cfi->mfr == CFI_MFR_SAMSUNG) {
+ if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ /*
+ * Samsung K8P2815UQB and K8D6x16UxM chips
+ * report major=0 / minor=0.
+ * K8D3x16UxC chips report major=3 / minor=3.
+ */
+ printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
+ " Extended Query version to 1.%c\n",
+ extp->MinorVersion);
+ extp->MajorVersion = '1';
+ }
+ }
+
/*
* SST 38VF640x chips report major=0xFF / minor=0xFF.
*/
@@ -428,6 +440,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
+ __func__, mtd->writebufsize);
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f5a370..c04b7658abe9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
map->fldrv = &cfi_staa_chipdrv;
__module_get(THIS_MODULE);
mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c637d2..6ae3d111e1e7 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
for (f=fixups; f->fixup; f++) {
if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
- f->fixup(mtd, f->param);
+ f->fixup(mtd);
}
}
}
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d18064977192..5e3cc80128aa 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
{
printk(KERN_NOTICE "using fwh lock/unlock method\n");
/* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002209bd..e4eba6cc1b2e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
#define OPCODE_WRDI 0x04 /* Write disable */
#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
+/* Used for Macronix flashes only. */
+#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
+#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -62,7 +66,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define MAX_CMD_SIZE 4
+#define MAX_CMD_SIZE 5
#ifdef CONFIG_M25PXX_USE_FAST_READ
#define OPCODE_READ OPCODE_FAST_READ
@@ -152,6 +156,16 @@ static inline int write_disable(struct m25p *flash)
}
/*
+ * Enable/disable 4-byte addressing mode.
+ */
+static inline int set_4byte(struct m25p *flash, int enable)
+{
+ u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
+
+ return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
+
+/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
*/
@@ -207,6 +221,7 @@ static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
cmd[1] = addr >> (flash->addr_width * 8 - 8);
cmd[2] = addr >> (flash->addr_width * 8 - 16);
cmd[3] = addr >> (flash->addr_width * 8 - 24);
+ cmd[4] = addr >> (flash->addr_width * 8 - 32);
}
static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t actual;
int cmd_sz, ret;
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+ dev_name(&flash->spi->dev), __func__, "to",
+ (u32)to, len);
+
*retlen = 0;
/* sanity checks */
@@ -607,7 +626,6 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .addr_width = 3, \
.flags = (_flags), \
})
@@ -635,7 +653,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
/* EON -- en25pxx */
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
@@ -653,6 +671,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
return &m25p_ids[tmp];
}
}
+ dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return ERR_PTR(-ENODEV);
}
@@ -883,7 +904,17 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
- flash->addr_width = info->addr_width;
+
+ if (info->addr_width)
+ flash->addr_width = info->addr_width;
+ else {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ if (flash->mtd.size > 0x1000000) {
+ flash->addr_width = 4;
+ set_4byte(flash, 1);
+ } else
+ flash->addr_width = 3;
+ }
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a8a5ed..c163e619abc9 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@ out:
return ret;
}
-static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
+static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -375,7 +375,7 @@ static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __init sst25l_probe(struct spi_device *spi)
+static int __devinit sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92db0c46..77d64ce19e9f 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.16llx-0x%.16llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c120ee84..1f3049590d9e 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@ static int bcm963xx_probe(struct platform_device *pdev)
bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
if (!bcm963xx_mtd_info) {
dev_err(&pdev->dev, "failed to probe using CFI\n");
+ bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
+ if (bcm963xx_mtd_info)
+ goto probe_ok;
+ dev_err(&pdev->dev, "failed to probe using JEDEC\n");
err = -EIO;
goto err_probe;
}
+probe_ok:
bcm963xx_mtd_info->owner = THIS_MODULE;
/* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462bea9b5..5fdb7b26cea3 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.016llx-0x%.016llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93dc1aad..4feb7507ab7c 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
- printk(KERN_DEBUG MOD_NAME
- ": %s(): Unable to register resource"
- " 0x%.08llx-0x%.08llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf243a74..1337a4191a0c 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
- printk(KERN_DEBUG MOD_NAME
- ": %s(): Unable to register resource"
- " 0x%.16llx-0x%.16llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814aa027..8506578e6a35 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
continue;
}
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
err = -EBUSY;
res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391ebb736e..027e628a4f1d 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@ static int __init init_scx200_docflash(void)
outl(pmr, scx200_cb_base + SCx200_PMR);
}
- printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n",
- (unsigned long long)docmem.start,
- (unsigned long long)docmem.end, width);
+ printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+ &docmem, width);
scx200_docflash_map.size = size;
if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 60146984f4be..c08e140d40ed 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@ static int __init init_tqm_mtd(void)
goto error_mem;
}
- map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+ map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
if (!map_banks[idx]->name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index ee4bb3330bdf..145b3d0dc0db 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Only master mtd device must be used to control partitions */
- if (!mtd_is_master(mtd))
- return -EINVAL;
-
if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
return -EFAULT;
@@ -535,6 +531,10 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
switch (a.op) {
case BLKPG_ADD_PARTITION:
+ /* Only master mtd device must be used to add partitions */
+ if (mtd_is_partition(mtd))
+ return -EINVAL;
+
return mtd_add_partition(mtd, p.devname, p.start, p.length);
case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
}
case MEMGETINFO:
+ memset(&info, 0, sizeof(info));
info.type = mtd->type;
info.flags = mtd->flags;
info.size = mtd->size;
@@ -609,7 +610,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
info.oobsize = mtd->oobsize;
/* The below fields are obsolete */
info.ecctype = -1;
- info.eccsize = 0;
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
@@ -1201,7 +1201,7 @@ err_unregister_chdev:
static void __exit cleanup_mtdchar(void)
{
unregister_mtd_user(&mtdchar_notifier);
- mntput_long(mtd_inode_mnt);
+ mntput(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de0943103..5f5777bd3f75 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
concat->mtd.writesize = subdev[0]->writesize;
+ concat->mtd.writebufsize = subdev[0]->writebufsize;
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index c948150079be..e3e40f440323 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -401,7 +401,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_scheduled_work();
+ flush_work_sync(&cxt->work_erase);
+ flush_work_sync(&cxt->work_write);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689f1e16..0a4760174782 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
if (ops->datbuf && from + ops->len > mtd->size)
return -EINVAL;
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ /*
+ * If OOB is also requested, make sure that we do not read past the end
+ * of this partition.
+ */
+ if (ops->oobbuf) {
+ size_t len, pages;
+
+ if (ops->mode == MTD_OOB_AUTO)
+ len = mtd->oobavail;
+ else
+ len = mtd->oobsize;
+ pages = mtd_div_by_ws(mtd->size, mtd);
+ pages -= mtd_div_by_ws(from, mtd);
+ if (ops->ooboffs + ops->ooblen > pages * len)
+ return -EINVAL;
+ }
+
+ res = part->master->read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (res == -EUCLEAN)
mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.flags = master->flags & ~part->mask_flags;
slave->mtd.size = part->size;
slave->mtd.writesize = master->writesize;
+ slave->mtd.writebufsize = master->writebufsize;
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
}
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
-int mtd_is_master(struct mtd_info *mtd)
+int mtd_is_partition(struct mtd_info *mtd)
{
struct mtd_part *part;
- int nopart = 0;
+ int ispart = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(part, &mtd_partitions, list)
if (&part->mtd == mtd) {
- nopart = 1;
+ ispart = 1;
break;
}
mutex_unlock(&mtd_partitions_mutex);
- return nopart;
+ return ispart;
}
-EXPORT_SYMBOL_GPL(mtd_is_master);
+EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802b4346..c89592239bc7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@ config MTD_NAND_SPIA
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
depends on MACH_AMS_DELTA
+ default y
help
Support for NAND flash on Amstrad E3 (Delta).
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e1065bf8..a067d090cb31 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
* Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
*
* Derived from drivers/mtd/toto.c
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from drivers/mtd/nand/plat_nand.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@ static struct mtd_partition partition_info[] = {
static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
- omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
- omap_writew(byte, this->IO_ADDR_W);
+ writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+ writew(byte, this->IO_ADDR_W);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
ndelay(40);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
{
u_char res;
struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
ndelay(40);
- omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
- res = omap_readw(this->IO_ADDR_R);
+ writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+ res = readw(this->IO_ADDR_R);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
AMS_DELTA_LATCH2_NAND_NRE);
@@ -151,11 +155,16 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
/*
* Main initialization routine
*/
-static int __init ams_delta_init(void)
+static int __devinit ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *io_base;
int err = 0;
+ if (!res)
+ return -ENXIO;
+
/* Allocate memory for MTD device structure and private data */
ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@ static int __init ams_delta_init(void)
/* Link the private data with the MTD structure */
ams_delta_mtd->priv = this;
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_release_io;
+ }
+
+ this->priv = io_base;
+
/* Set address of NAND IO lines */
- this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH);
- this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT);
+ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+ this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
this->read_byte = ams_delta_read_byte;
this->write_buf = ams_delta_write_buf;
this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@ static int __init ams_delta_init(void)
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
+ platform_set_drvdata(pdev, io_base);
+
/* Set chip enabled, but */
ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@ static int __init ams_delta_init(void)
goto out;
out_mtd:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(io_base);
+out_release_io:
+ release_mem_region(res->start, resource_size(res));
+out_free:
kfree(ams_delta_mtd);
out:
return err;
}
-module_init(ams_delta_init);
-
/*
* Clean up routine
*/
-static void __exit ams_delta_cleanup(void)
+static int __devexit ams_delta_cleanup(struct platform_device *pdev)
{
+ void __iomem *io_base = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* Release resources, unregister device */
nand_release(ams_delta_mtd);
+ iounmap(io_base);
+ release_mem_region(res->start, resource_size(res));
+
/* Free the MTD device structure */
kfree(ams_delta_mtd);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_nand_driver = {
+ .probe = ams_delta_init,
+ .remove = __devexit_p(ams_delta_cleanup),
+ .driver = {
+ .name = "ams-delta-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ams_delta_nand_init(void)
+{
+ return platform_driver_register(&ams_delta_nand_driver);
+}
+module_init(ams_delta_nand_init);
+
+static void __exit ams_delta_nand_exit(void)
+{
+ platform_driver_unregister(&ams_delta_nand_driver);
}
-module_exit(ams_delta_cleanup);
+module_exit(ams_delta_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07b25d1..7a13d42cbabd 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
"page_addr: 0x%x, column: 0x%x.\n",
page_addr, column);
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba25b0c..205b10b9f9b9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-/*
- * Default partition tables to be used if the partition information not
- * provided through platform data
- */
-#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
+#ifdef CONFIG_MTD_PARTITIONS
/*
+ * Default partition tables to be used if the partition information not
+ * provided through platform data.
+ *
* Default partition layout for small page(= 512 bytes) devices
* Size for "Root file system" is updated in driver based on actual device size
*/
static struct mtd_partition partition_info_16KB_blk[] = {
- PARTITION("X-loader", 0, 4 * 0x4000),
- PARTITION("U-Boot", 0x10000, 20 * 0x4000),
- PARTITION("Kernel", 0x60000, 256 * 0x4000),
- PARTITION("Root File System", 0x460000, 0),
+ {
+ .name = "X-loader",
+ .offset = 0,
+ .size = 4*0x4000,
+ },
+ {
+ .name = "U-Boot",
+ .offset = 0x10000,
+ .size = 20*0x4000,
+ },
+ {
+ .name = "Kernel",
+ .offset = 0x60000,
+ .size = 256*0x4000,
+ },
+ {
+ .name = "Root File System",
+ .offset = 0x460000,
+ .size = 0,
+ },
};
/*
@@ -141,19 +157,37 @@ static struct mtd_partition partition_info_16KB_blk[] = {
* Size for "Root file system" is updated in driver based on actual device size
*/
static struct mtd_partition partition_info_128KB_blk[] = {
- PARTITION("X-loader", 0, 4 * 0x20000),
- PARTITION("U-Boot", 0x80000, 12 * 0x20000),
- PARTITION("Kernel", 0x200000, 48 * 0x20000),
- PARTITION("Root File System", 0x800000, 0),
+ {
+ .name = "X-loader",
+ .offset = 0,
+ .size = 4*0x20000,
+ },
+ {
+ .name = "U-Boot",
+ .offset = 0x80000,
+ .size = 12*0x20000,
+ },
+ {
+ .name = "Kernel",
+ .offset = 0x200000,
+ .size = 48*0x20000,
+ },
+ {
+ .name = "Root File System",
+ .offset = 0x800000,
+ .size = 0,
+ },
};
#ifdef CONFIG_MTD_CMDLINE_PARTS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
+#endif
/**
- * struct fsmc_nand_data - atructure for FSMC NAND device state
+ * struct fsmc_nand_data - structure for FSMC NAND device state
*
+ * @pid: Part ID on the AMBA PrimeCell format
* @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
* @partitions: Partition info for a NAND Flash.
@@ -169,6 +203,7 @@ const char *part_probes[] = { "cmdlinepart", NULL };
* @regs_va: FSMC regs base address.
*/
struct fsmc_nand_data {
+ u32 pid;
struct mtd_info mtd;
struct nand_chip nand;
struct mtd_partition *partitions;
@@ -508,7 +543,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
struct nand_chip *nand;
struct fsmc_regs *regs;
struct resource *res;
- int nr_parts, ret = 0;
+ int ret = 0;
+ u32 pid;
+ int i;
if (!pdata) {
dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (ret)
goto err_probe1;
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ host->pid = pid;
+ dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+ "revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
host->bank = pdata->bank;
host->select_chip = pdata->select_bank;
regs = host->regs_va;
@@ -625,7 +674,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
- if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
nand->ecc.calculate = fsmc_read_hwecc_ecc4;
nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
goto err_probe;
}
- if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ if (AMBA_REV_BITS(host->pid) >= 8) {
if (host->mtd.writesize == 512) {
nand->ecc.layout = &fsmc_ecc4_sp_layout;
host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Check if partition info passed via command line
*/
host->mtd.name = "nand";
- nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
+ host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
&host->partitions, 0);
- if (nr_parts > 0) {
- host->nr_partitions = nr_parts;
- } else {
+ if (host->nr_partitions <= 0) {
#endif
/*
* Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc31bd5..cea38a5d4ac5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
return 0;
}
-
-/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
- * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
- * into the eccpos array. */
-static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- unsigned int ecc_offset = chip->page_shift;
-
- /* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
-
- stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
- if (stat < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += stat;
- }
- return 0;
-}
-
-/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
-static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
- unsigned int ecc_offset = chip->page_shift;
-
- for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
- chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
- }
-
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = {"cmdline", NULL};
#endif
@@ -393,9 +341,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.size = 512;
chip->ecc.bytes = 9;
- chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first;
- chip->ecc.write_page = jz_nand_write_page_hwecc;
-
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
@@ -489,7 +434,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz_nand_driver = {
+static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
.remove = __devexit_p(jz_nand_remove),
.driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03afdd48..ef932ba55a0b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- int err = 0, nr_parts = 0;
+ int err = 0, __maybe_unused nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31bf376b82a0..a9c6ce745767 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2865,20 +2865,24 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
/* check version */
val = le16_to_cpu(p->revision);
- if (val == 1 || val > (1 << 4)) {
- printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
- __func__, val);
- return 0;
- }
-
- if (val & (1 << 4))
+ if (val & (1 << 5))
+ chip->onfi_version = 23;
+ else if (val & (1 << 4))
chip->onfi_version = 22;
else if (val & (1 << 3))
chip->onfi_version = 21;
else if (val & (1 << 2))
chip->onfi_version = 20;
- else
+ else if (val & (1 << 1))
chip->onfi_version = 10;
+ else
+ chip->onfi_version = 0;
+
+ if (!chip->onfi_version) {
+ printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
+ __func__, val);
+ return 0;
+ }
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
mtd->writesize = le32_to_cpu(p->byte_per_page);
mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+ chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
busw = 0;
if (le16_to_cpu(p->features) & 1)
busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@ ident_done:
printk(KERN_INFO "NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? type->name : chip->onfi_params.model);
+ chip->onfi_version ? chip->onfi_params.model : type->name);
return type;
}
@@ -3435,6 +3439,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->resume = nand_resume;
mtd->block_isbad = nand_block_isbad;
mtd->block_markbad = nand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
/* propagate ecc.layout to mtd_info */
mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981f0e61..6ebd869993aa 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* verify_bbt_descr - verify the bad block description
- * @bd: the table to verify
+ * @mtd: MTD device structure
+ * @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aab1253..a5aa99f014ba 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
-#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
-#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
#define STATE_CMD_RESET 0x0000000C /* reset */
@@ -230,7 +230,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
#define STATE_ADDR_MASK 0x00000070 /* address states mask */
-/* Durind data input/output the simulator is in these states */
+/* During data input/output the simulator is in these states */
#define STATE_DATAIN 0x00000100 /* waiting for data input */
#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
@@ -248,7 +248,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
/* Simulator's actions bit masks */
#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
-#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
#define ACTION_SECERASE 0x00300000 /* erase sector */
#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
+#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
-/* Remove action bits ftom state */
+/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
/*
* Maximum previous states which need to be saved. Currently saving is
- * only needed for page programm operation with preceeded read command
+ * only needed for page program operation with preceded read command
* (which is only valid for 512-byte pages).
*/
#define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@ static struct nandsim_operations {
/* Read OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
- /* Programm page starting from the beginning */
+ /* Program page starting from the beginning */
{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm page starting from the beginning */
+ /* Program page starting from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm page starting from the second half */
+ /* Program page starting from the second half */
{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm OOB */
+ /* Program OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Erase sector */
@@ -470,7 +470,7 @@ static int alloc_device(struct nandsim *ns)
err = -EINVAL;
goto err_close;
}
- ns->pages_written = vmalloc(ns->geom.pgnum);
+ ns->pages_written = vzalloc(ns->geom.pgnum);
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -483,7 +483,6 @@ static int alloc_device(struct nandsim *ns)
goto err_free;
}
ns->cfile = cfile;
- memset(ns->pages_written, 0, ns->geom.pgnum);
return 0;
}
@@ -1171,9 +1170,9 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* of supported operations.
*
* Operation can be unknown because of the following.
- * 1. New command was accepted and this is the firs call to find the
+ * 1. New command was accepted and this is the first call to find the
* correspondent states chain. In this case ns->npstates = 0;
- * 2. There is several operations which begin with the same command(s)
+ * 2. There are several operations which begin with the same command(s)
* (for example program from the second half and read from the
* second half operations both begin with the READ1 command). In this
* case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
* zeroed).
*
- * If there are several maches, the current state is pushed to the
+ * If there are several matches, the current state is pushed to the
* ns->pstates.
*
* The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* operation is searched using the following pattern:
* ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
*
- * It is supposed that this pattern must either match one operation on
+ * It is supposed that this pattern must either match one operation or
* none. There can't be ambiguity in that case.
*
- * If no matches found, the functions does the following:
+ * If no matches found, the function does the following:
* 1. if there are saved states present, try to ignore them and search
* again only using the last command. If nothing was found, switch
* to the STATE_READY state.
@@ -1668,7 +1667,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
case ACTION_PRGPAGE:
/*
- * Programm page - move internal buffer data to the page.
+ * Program page - move internal buffer data to the page.
*/
if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
NS_DBG("read_byte: all bytes were read\n");
/*
- * The OPT_AUTOINCR allows to read next conseqitive pages without
+ * The OPT_AUTOINCR allows to read next consecutive pages without
* new read operation cycle.
*/
if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb2461d740..bb277a54986f 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
if (pasemi_nand_mtd)
return -ENODEV;
- pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end);
+ pr_debug("pasemi_nand at %pR\n", &res);
/* Allocate memory for MTD device structure and private data */
pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518cc5eb..ea2c288df3f6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
/* set info fields needed to __readid */
info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
info->reg_ndcr = ndcr;
+ info->cmdset = &default_cmdset;
if (__readid(info, &id))
return -ENODEV;
@@ -915,7 +916,6 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
- info->cmdset = &default_cmdset;
return 0;
}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c0ef4a..ca270a4881a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
- chip->ecc.size = mtd->writesize;
- chip->ecc.bytes = 3 * (mtd->writesize / 256);
+ /* Hardware ECC 6 byte ECC per 512 Byte data */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
}
ret = nand_scan_tail(mtd);
}
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7798b..ac31f461cc1c 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <asm/mach/flash.h>
#include <plat/gpmc.h>
@@ -63,8 +64,13 @@ struct omap2_onenand {
int dma_channel;
int freq;
int (*setup)(void __iomem *base, int freq);
+ struct regulator *regulator;
};
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@ static void wait_warn(char *msg, int state, unsigned int ctrl,
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
unsigned int intr = 0;
- unsigned int ctrl;
+ unsigned int ctrl, ctrl_mask;
unsigned long timeout;
u32 syscfg;
@@ -180,7 +187,8 @@ retry:
if (result == 0) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- if (ctrl & ONENAND_CTRL_ONGO) {
+ if (ctrl & ONENAND_CTRL_ONGO &&
+ !this->ongoing) {
/*
* The operation seems to be still going
* so give it some more time.
@@ -269,7 +277,11 @@ retry:
return -EIO;
}
- if (ctrl & 0xFE9F)
+ ctrl_mask = 0xFE9F;
+ if (this->ongoing)
+ ctrl_mask &= ~0x8000;
+
+ if (ctrl & ctrl_mask)
wait_warn("unexpected controller status", state, ctrl, intr);
return 0;
@@ -591,6 +603,30 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
+static int omap2_onenand_enable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_enable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "cant enable regulator\n");
+
+ return ret;
+}
+
+static int omap2_onenand_disable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_disable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "cant disable regulator\n");
+
+ return ret;
+}
+
static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
}
+ if (pdata->regulator_can_sleep) {
+ c->regulator = regulator_get(&pdev->dev, "vonenand");
+ if (IS_ERR(c->regulator)) {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ goto err_release_dma;
+ }
+ c->onenand.enable = omap2_onenand_enable;
+ c->onenand.disable = omap2_onenand_disable;
+ }
+
if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_dma;
+ goto err_release_regulator;
switch ((c->onenand.version_id >> 4) & 0xf) {
case 0:
@@ -727,13 +773,15 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
- if (pdata->parts != NULL)
- r = add_mtd_partitions(&c->mtd, pdata->parts,
- pdata->nr_parts);
+ r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
+ if (r > 0)
+ r = add_mtd_partitions(&c->mtd, c->parts, r);
+ else if (pdata->parts != NULL)
+ r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
else
#endif
r = add_mtd_device(&c->mtd);
- if (r < 0)
+ if (r)
goto err_release_onenand;
platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
+err_release_regulator:
+ regulator_put(c->regulator);
err_release_dma:
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@ err_release_mem_region:
err_free_cs:
gpmc_cs_free(c->gpmc_cs);
err_kfree:
+ kfree(c->parts);
kfree(c);
return r;
@@ -766,18 +817,8 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
- BUG_ON(c == NULL);
-
-#ifdef CONFIG_MTD_PARTITIONS
- if (c->parts)
- del_mtd_partitions(&c->mtd);
- else
- del_mtd_device(&c->mtd);
-#else
- del_mtd_device(&c->mtd);
-#endif
-
onenand_release(&c->mtd);
+ regulator_put(c->regulator);
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
gpmc_cs_free(c->gpmc_cs);
+ kfree(c->parts);
kfree(c);
return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875647c9..bac41caa8df7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
- ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -430,7 +429,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@@ -949,6 +948,8 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state)
if (this->state == FL_READY) {
this->state = new_state;
spin_unlock(&this->chip_lock);
+ if (new_state != FL_PM_SUSPENDED && this->enable)
+ this->enable(mtd);
break;
}
if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@ static void onenand_release_device(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
+ if (this->state != FL_PM_SUSPENDED && this->disable)
+ this->disable(mtd);
/* Release the chip */
spin_lock(&this->chip_lock);
this->state = FL_READY;
@@ -1353,7 +1356,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
@@ -1429,7 +1432,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@@ -1485,8 +1488,7 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip *this = mtd->priv;
unsigned long timeout;
- unsigned int interrupt;
- unsigned int ctrl;
+ unsigned int interrupt, ctrl, ecc, addr1, addr8;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
/* To get correct interrupt status in timeout case */
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+ addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
if (interrupt & ONENAND_INT_READ) {
- int ecc = onenand_read_ecc(this);
+ ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
- printk(KERN_WARNING "%s: ecc error = 0x%04x, "
- "controller error 0x%04x\n",
- __func__, ecc, ctrl);
+ printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ecc, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
- printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
- __func__, ctrl, interrupt);
+ printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_FATAL_ERROR;
}
/* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
- __func__, ctrl);
+ printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+ "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ERROR;
}
@@ -1558,7 +1563,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
@@ -1612,7 +1617,7 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
u_char *oob_buf = this->oob_buf;
int status, i, readcmd;
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
const u_char *buf = ops->datbuf;
const u_char *oob = ops->oobbuf;
u_char *oobbuf;
- int ret = 0;
+ int ret = 0, cmd;
DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
ONENAND_SET_NEXT_BUFFERRAM(this);
}
- this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+ this->ongoing = 0;
+ cmd = ONENAND_CMD_PROG;
+
+ /* Exclude 1st OTP and OTP blocks for cache program feature */
+ if (ONENAND_IS_CACHE_PROGRAM(this) &&
+ likely(onenand_block(this, to) != 0) &&
+ ONENAND_IS_4KB_PAGE(this) &&
+ ((written + thislen) < len)) {
+ cmd = ONENAND_CMD_2X_CACHE_PROG;
+ this->ongoing = 1;
+ }
+
+ this->command(mtd, cmd, to, mtd->writesize);
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
oobbuf = this->oob_buf;
- oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+ oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
/* Loop until all data write */
while (written < len) {
@@ -2086,7 +2103,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
+ if (ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
- if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+ if (ONENAND_IS_4KB_PAGE(this) || region ||
+ instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
/* region is set for Flex-OneNAND (no mb erase) */
ret = onenand_block_by_block_erase(mtd, instr,
region, block_size);
@@ -3029,7 +3047,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@@ -3377,8 +3395,10 @@ static void onenand_check_features(struct mtd_info *mtd)
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
- else if (numbufs == 1)
+ else if (numbufs == 1) {
this->options |= ONENAND_HAS_4KB_PAGE;
+ this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ }
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ /* The MLC has 4KiB pagesize. */
+ if (ONENAND_IS_MLC(this))
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ if (ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip has 2 plane\n");
if (this->options & ONENAND_HAS_4KB_PAGE)
printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+ if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+ printk(KERN_DEBUG "Chip has cache program feature\n");
}
/**
@@ -3831,7 +3857,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->block_isbad = onenand_block_isbad;
mtd->block_markbad = onenand_block_markbad;
mtd->owner = THIS_MODULE;
+ mtd->writebufsize = mtd->writesize;
/* Unlock whole block */
this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3c453b..fc2c16a0fd1c 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
- ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops);
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
- if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+ if (ret || check_short_pattern(&buf[j * scanlen],
+ scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
- i >> 1, (unsigned int) from);
+ printk(KERN_INFO "OneNAND eraseblock %d is an "
+ "initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05e6de0..a4c74a9ba430 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
- int err, page_dma = 0;
+ int err, ofs, page_dma = 0;
struct device *dev = &onenand->pdev->dev;
p = this->base + area;
@@ -677,10 +677,13 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
if (!page)
goto normal;
+ /* Page offset */
+ ofs = ((size_t) buf & ~PAGE_MASK);
page_dma = 1;
+
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
- dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
+ dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
} else {
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5ebe280225d6..f49e49dc5928 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,7 +672,33 @@ static int io_init(struct ubi_device *ubi)
ubi->nor_flash = 1;
}
- ubi->min_io_size = ubi->mtd->writesize;
+ /*
+ * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
+ * for these purposes, not @mtd->writesize. At the moment this does not
+ * matter for NAND, because currently @mtd->writebufsize is equivalent to
+ * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
+ * have @mtd->writebufsize which is multiple of @mtd->writesize.
+ *
+ * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
+ * UBI and UBIFS recovery algorithms rely on the fact that if there was
+ * an unclean power cut, then we can find offset of the last corrupted
+ * node, align the offset to @ubi->min_io_size, read the rest of the
+ * eraseblock starting from this offset, and check whether there are
+ * only 0xFF bytes. If yes, then we are probably dealing with a
+ * corruption caused by a power cut, if not, then this is probably some
+ * severe corruption.
+ *
+ * Thus, we have to use the maximum write unit size of the flash, which
+ * is @mtd->writebufsize, because @mtd->writesize is the minimum write
+ * size, not the maximum.
+ */
+ if (ubi->mtd->type == MTD_NANDFLASH)
+ ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
+ else if (ubi->mtd->type == MTD_NORFLASH)
+ ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
+
+ ubi->min_io_size = ubi->mtd->writebufsize;
+
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/*
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f65fe0b..0b8141fc5c26 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
/* Read both LEB 0 and LEB 1 into memory */
ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vmalloc(ubi->vtbl_size);
+ leb[seb->lnum] = vzalloc(ubi->vtbl_size);
if (!leb[seb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- memset(leb[seb->lnum], 0, ubi->vtbl_size);
err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
ubi->vtbl_size);
@@ -516,10 +515,9 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
int i;
struct ubi_vtbl_record *vtbl;
- vtbl = vmalloc(ubi->vtbl_size);
+ vtbl = vzalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
- memset(vtbl, 0, ubi->vtbl_size);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4c8bfc97fb4c..16fe4f9b719b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3389,8 +3389,7 @@ config NETCONSOLE
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
- depends on NETCONSOLE && SYSFS
- select CONFIGFS_FS
+ depends on NETCONSOLE && SYSFS && CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d849cf25..62d6f88cbab5 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
}
/**
- * ks8695_get_settings - Get device-specific settings.
+ * ks8695_wan_get_settings - Get device-specific settings.
* @ndev: The network device to read settings from
* @cmd: The ethtool structure to read into
*/
static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
SUPPORTED_TP | SUPPORTED_MII);
cmd->transceiver = XCVR_INTERNAL;
- /* Port specific extras */
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- cmd->phy_address = 0;
- /* not supported for HPNA */
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->phy_address = 0;
- /* BUG: Erm, dtype hpna implies no phy regs */
- /*
- ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
- cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- cmd->port = PORT_MII;
- cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
- cmd->phy_address = 0;
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if ((ctrl & WMC_WAND) == 0) {
+ /* auto-negotiation is enabled */
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (ctrl & WMC_WANA100F)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl & WMC_WANA100H)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (ctrl & WMC_WANA10F)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (ctrl & WMC_WANA10H)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (ctrl & WMC_WANAP)
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->autoneg = AUTONEG_ENABLE;
+
+ cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WDS) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ /* auto-negotiation is disabled */
+ cmd->autoneg = AUTONEG_DISABLE;
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- if ((ctrl & WMC_WAND) == 0) {
- /* auto-negotiation is enabled */
- cmd->advertising |= ADVERTISED_Autoneg;
- if (ctrl & WMC_WANA100F)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (ctrl & WMC_WANA100H)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (ctrl & WMC_WANA10F)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (ctrl & WMC_WANA10H)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (ctrl & WMC_WANAP)
- cmd->advertising |= ADVERTISED_Pause;
- cmd->autoneg = AUTONEG_ENABLE;
-
- cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WDS) ?
- DUPLEX_FULL : DUPLEX_HALF;
- } else {
- /* auto-negotiation is disabled */
- cmd->autoneg = AUTONEG_DISABLE;
-
- cmd->speed = (ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WANFF) ?
- DUPLEX_FULL : DUPLEX_HALF;
- }
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
+ cmd->speed = (ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WANFF) ?
+ DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/**
- * ks8695_set_settings - Set device-specific settings.
+ * ks8695_wan_set_settings - Set device-specific settings.
* @ndev: The network device to configure
* @cmd: The settings to configure
*/
static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
ADVERTISED_100baseT_Full)) == 0)
return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA does not support auto-negotiation. */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
- WMC_WANA10F | WMC_WANA10H);
- if (cmd->advertising & ADVERTISED_100baseT_Full)
- ctrl |= WMC_WANA100F;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
- ctrl |= WMC_WANA100H;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
- ctrl |= WMC_WANA10F;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
- ctrl |= WMC_WANA10H;
-
- /* force a re-negotiation */
- ctrl |= WMC_WANR;
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ ctrl |= WMC_WANA100F;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ ctrl |= WMC_WANA100H;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ ctrl |= WMC_WANA10F;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ ctrl |= WMC_WANA10H;
+
+ /* force a re-negotiation */
+ ctrl |= WMC_WANR;
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
} else {
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* BUG: dtype_hpna implies no phy registers */
- /*
- ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
-
- ctrl &= ~(HMC_HSS | HMC_HDS);
- if (cmd->speed == SPEED_100)
- ctrl |= HMC_HSS;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= HMC_HDS;
-
- __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* disable auto-negotiation */
- ctrl |= WMC_WAND;
- ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
- if (cmd->speed == SPEED_100)
- ctrl |= WMC_WANF100;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= WMC_WANFF;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* disable auto-negotiation */
+ ctrl |= WMC_WAND;
+ ctrl &= ~(WMC_WANF100 | WMC_WANFF);
+
+ if (cmd->speed == SPEED_100)
+ ctrl |= WMC_WANF100;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= WMC_WANFF;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
}
return 0;
}
/**
- * ks8695_nwayreset - Restart the autonegotiation on the port.
+ * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
* @ndev: The network device to restart autoneotiation on
*/
static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy means no autonegotiation on hpna */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- if ((ctrl & WMC_WAND) == 0)
- writel(ctrl | WMC_WANR,
- ksp->phyiface_regs + KS8695_WMC);
- else
- /* auto-negotiation not enabled */
- return -EINVAL;
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-/**
- * ks8695_get_link - Retrieve link status of network interface
- * @ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
+ if ((ctrl & WMC_WAND) == 0)
+ writel(ctrl | WMC_WANR,
+ ksp->phyiface_regs + KS8695_WMC);
+ else
+ /* auto-negotiation not enabled */
+ return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA always has link */
- return 1;
- case KS8695_DTYPE_WAN:
- /* WAN we can read the PHY for */
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- return ctrl & WMC_WLS;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
return 0;
}
/**
- * ks8695_get_pause - Retrieve network pause/flow-control advertising
+ * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
* @ndev: The device to retrieve settings from
* @param: The structure to fill out with the information
*/
static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy link on hpna to configure */
- return;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* advertise Pause */
- param->autoneg = (ctrl & WMC_WANAP);
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- /* current Rx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- param->rx_pause = (ctrl & DRXC_RFCE);
+ /* advertise Pause */
+ param->autoneg = (ctrl & WMC_WANAP);
- /* current Tx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- param->tx_pause = (ctrl & DTXC_TFCE);
- break;
- case KS8695_DTYPE_LAN:
- /* The LAN's "phy" is a direct-attached switch */
- return;
- }
-}
+ /* current Rx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ param->rx_pause = (ctrl & DRXC_RFCE);
-/**
- * ks8695_set_pause - Configure pause/flow-control
- * @ndev: The device to configure
- * @param: The pause parameters to set
- *
- * TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
- return -EOPNOTSUPP;
+ /* current Tx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ param->tx_pause = (ctrl & DTXC_TFCE);
}
/**
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
static const struct ethtool_ops ks8695_ethtool_ops = {
.get_msglevel = ks8695_get_msglevel,
.set_msglevel = ks8695_set_msglevel,
- .get_settings = ks8695_get_settings,
- .set_settings = ks8695_set_settings,
- .nway_reset = ks8695_nwayreset,
- .get_link = ks8695_get_link,
- .get_pauseparam = ks8695_get_pause,
- .set_pauseparam = ks8695_set_pause,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_settings = ks8695_wan_get_settings,
+ .set_settings = ks8695_wan_set_settings,
+ .nway_reset = ks8695_wan_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = ks8695_wan_get_pause,
.get_drvinfo = ks8695_get_drvinfo,
};
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev)
/* driver system setup */
ndev->netdev_ops = &ks8695_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
}
/* And bring up the net_device with the net core */
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b9fc5173aef..22abfb39d813 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1284,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* skip non-multicast addresses */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc(ETH_ALEN, addrs);
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= 26;
if (crc & 0x20)
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae91991..142d6047da27 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
- memset(ioc_attr, 0, sizeof(*ioc_attr));
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2cbbf8..3437613f0454 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
int mac_off = 0;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
const unsigned char *addr;
#endif
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
if (found & VPD_FOUND_MAC)
goto done;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
cp->of_node = pci_device_to_OF_node(pdev);
#endif
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index de69c54301c1..bfab14092d2c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
- if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+ if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
+ /*
+ * we might have caused the interrupt, but the above
+ * read cleared it, and just in case the driver is
+ * down there is nothing to do so return handled
+ */
+ if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+ return IRQ_HANDLED;
+
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 1397da118f0d..89a69035e538 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
- * Without fix, issue can cause tx timeouts.
+ * Without fix, issue can cause Tx timeouts.
*/
reg = er32(GCR2);
reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c91369f35..28519acacd2d 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2e0b7c..13149983d07e 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 5255be753746..e610e1369053 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e45a61c8930a..2fefa820302b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f8ed03dab9b1..fa08b6336cfb 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index e774380c7cec..bc0860a598c9 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ enum e1e_registers {
E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+ E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
/* Convenience macros
*
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 5bb65b7382db..fb46974cfec1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index ff2872153b21..68aa1749bf66 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
+ * but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but we
- * do not support receiving pause frames).
+ * do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
*/
switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause
- * frames but not send pause frames).
+ * frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * frames but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ "Rx PAUSE frames only.\r\n");
}
}
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fa5b60452547..1c18f26b0812 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
/* Interrupt Registers */
{E1000_ICR, "ICR"},
- /* RX Registers */
+ /* Rx Registers */
{E1000_RCTL, "RCTL"},
{E1000_RDLEN, "RDLEN"},
{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{E1000_RDFTS, "RDFTS"},
{E1000_RDFPC, "RDFPC"},
- /* TX Registers */
+ /* Tx Registers */
{E1000_TCTL, "TCTL"},
{E1000_TDBAL, "TDBAL"},
{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
break;
default:
printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
printk(KERN_CONT "\n");
}
-
/*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
static void e1000e_dump(struct e1000_adapter *adapter)
{
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter)
struct e1000_reg_info *reginfo;
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 {
+ u64 a;
+ u64 b;
+ } *u0;
struct e1000_buffer *buffer_info;
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc_ps;
struct e1000_rx_desc *rx_desc;
- struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ struct my_u1 {
+ u64 a;
+ u64 b;
+ u64 c;
+ u64 d;
+ } *u1;
u32 staterr;
int i = 0;
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
+ "trans_start last_rx\n");
printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter)
e1000_regdump(hw, reginfo);
}
- /* Print TX Ring Summary */
+ /* Print Tx Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ " leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
- /* Print TX Rings */
+ /* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
*
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
(unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
}
- /* Print RX Rings Summary */
+ /* Print Rx Ring Summary */
rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC]\n");
printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ rx_ring->next_to_use, rx_ring->next_to_clean);
- /* Print RX Rings */
+ /* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
goto exit;
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
switch (adapter->rx_ps_pages) {
case 1:
case 2:
@@ -329,7 +334,7 @@ rx_ring_summary:
* +-----------------------------------------------------+
*/
printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
+ "[buffer 1 63:0 ] "
"[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
"[bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@ rx_ring_summary:
* 63 48 47 32 31 20 19 0
*/
printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
+ "[vl l0 ee es] "
"[ l3 l2 l1 hs] [reserved ] ---------------- "
"[bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@ rx_ring_summary:
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
- le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb);
} else {
printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %016llX %016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@ rx_ring_summary:
* 63 48 47 40 39 32 31 16 15 0
*/
printk(KERN_INFO "Rl[desc] [address 63:0 ] "
- "[vl er S cks ln] [bi->dma ] [bi->skb] "
- "<-- Legacy format\n");
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
rx_desc = E1000_RX_DESC(*rx_ring, i);
buffer_info = &rx_ring->buffer_info[i];
u0 = (struct my_u0 *)rx_desc;
printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
- "%016llX %p", i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %p", i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (i == rx_ring->next_to_use)
printk(KERN_CONT " NTU\n");
else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@ rx_ring_summary:
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- adapter->rx_buffer_len, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
}
}
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
- struct net_device *netdev,
- struct sk_buff *skb,
+ struct net_device *netdev, struct sk_buff *skb,
u8 status, __le16 vlan)
{
skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
}
/**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
@@ -548,7 +553,7 @@ map_skb:
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
break;
}
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page = &buffer_info->ps_pages[j];
if (j >= adapter->rx_ps_pages) {
/* all unused desc entries get hw null ptr */
- rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
continue;
}
if (!ps_page->page) {
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
if (dma_mapping_error(&pdev->dev,
ps_page->dma)) {
dev_err(&adapter->pdev->dev,
- "RX DMA page map failed\n");
+ "Rx DMA page map failed\n");
adapter->rx_dma_failed++;
goto no_buffers;
}
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* didn't change because each write-back
* erases this info.
*/
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page->dma);
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->rx_ps_bsize0,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_bsize0,
- DMA_FROM_DEVICE);
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
buffer_info->dma = 0;
- /* see !EOP comment in other rx routine */
+ /* see !EOP comment in other Rx routine */
if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
@@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
}
/**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if
- * there are tx hangs or other tx related bugs
+ * there are Tx hangs or other Tx related bugs
*/
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
ew32(TXDCTL(0), txdctl);
@@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->rx_ps_pages) {
/* this is a 32 byte descriptor */
rdlen = rx_ring->count *
- sizeof(union e1000_rx_desc_packet_split);
+ sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/*
* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
- * enable prefetching of 0x20 rx descriptors
+ * enable prefetching of 0x20 Rx descriptors
* granularity = 01
* wthresh = 04,
* hthresh = 04,
@@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req, 55);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
/*
- * the Tx fifo also stores 16 bytes of information about the tx
+ * the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
pba -= min_tx_space - tx_space;
/*
- * if short on Rx space, Rx wins and must trump tx
+ * if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
*/
if ((pba < min_rx_space) &&
@@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
adapter->netdev->name,
adapter->link_speed,
(adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
+ "Full Duplex" : "Half Duplex",
((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+ "Rx/Tx" :
+ ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4338,7 +4341,7 @@ link_up:
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
- /* flush partial descriptors to memory before detecting tx hang */
+ /* flush partial descriptors to memory before detecting Tx hang */
if (adapter->flags2 & FLAG2_DMA_BURST) {
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
@@ -4529,7 +4532,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->next_to_watch = i;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -4576,7 +4579,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
- segs = skb_shinfo(skb)->gso_segs ?: 1;
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
@@ -4588,13 +4591,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
buffer_info->dma = 0;
if (count)
count--;
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
@@ -6193,7 +6196,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0e4bca..4dd9b63273f6 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak,
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
-
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
*/
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
+
/* IntMode (Interrupt Mode)
*
* Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 00f89e8a9fa0..6bea051b134b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- /* Enable CRS on TX. This must be set for half-duplex operation. */
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675016b5..119aa2000c24 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev)
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct netdev_queue *txq;
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
int i = 0;
@@ -450,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
dev->stats.tx_bytes = tx_bytes;
@@ -2109,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- txq->tx_bytes += skb->len;
- txq->tx_packets ++;
+ tx_queue->stats.tx_bytes += skb->len;
+ tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb88ae0..54de4135e932 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
MQ_MG_MODE
};
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
/**
* struct gfar_priv_tx_q - per tx queue structure
* @txlock: per queue tx spin lock
* @tx_skbuff:skb pointers
* @skb_curtx: to be used skb pointer
* @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
struct txbd8 *dirty_tx;
+ struct tx_q_stats stats;
struct net_device *dev;
struct gfar_priv_grp *grp;
u16 skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
/*
* Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
*
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
*
* This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
* available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
dev_dbg(&dev->dev, " starting queue\n");
netif_start_queue(dev);
+ GRETH_REGSAVE(greth->regs->status, 0xFF);
+
napi_enable(&greth->napi);
greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
napi_disable(&greth->napi);
+ greth_disable_irqs(greth);
greth_disable_tx(greth);
+ greth_disable_rx(greth);
netif_stop_queue(dev);
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
int err = NETDEV_TX_OK;
- u32 status, dma_addr;
+ u32 status, dma_addr, ctrl;
+ unsigned long flags;
- bdp = greth->tx_bd_base + greth->tx_next;
+ /* Clean TX Ring */
+ greth_clean_tx(greth->netdev);
if (unlikely(greth->tx_free <= 0)) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_BUSY;
}
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ bdp = greth->tx_bd_base + greth->tx_next;
dma_addr = greth_read_bd(&bdp->addr);
memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
- status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
greth->tx_next = NEXT_TX(greth->tx_next);
greth->tx_free--;
- /* No more descriptors */
- if (unlikely(greth->tx_free == 0)) {
-
- /* Free transmitted descriptors */
- greth_clean_tx(dev);
-
- /* If nothing was cleaned, stop queue & wait for irq */
- if (unlikely(greth->tx_free == 0)) {
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
- }
-
/* Write descriptor control word and enable transmission */
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
out:
dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr;
+ u32 status = 0, dma_addr, ctrl;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+ unsigned long flags;
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* Clean TX Ring */
+ greth_clean_tx_gbit(dev);
+
if (greth->tx_free < nr_frags + 1) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
- status = GRETH_TXBD_CSALL;
+ status = GRETH_TXBD_CSALL | GRETH_BD_EN;
status |= frag->size & GRETH_BD_LEN;
/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* More fragments left */
if (i < nr_frags - 1)
status |= GRETH_TXBD_MORE;
-
- /* ... last fragment, check if out of descriptors */
- else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
- /* Enable interrupts and stop queue */
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
+ else
+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
greth_write_bd(&bdp->stat, status);
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
wmb();
- /* Enable the descriptors that we configured ... */
- for (i = 0; i < nr_frags + 1; i++) {
- bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = NEXT_TX(greth->tx_next);
- greth->tx_free--;
- }
+ /* Enable the descriptor chain by enabling the first descriptor */
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = curr_tx;
+ greth->tx_free -= nr_frags + 1;
+ wmb();
+
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
frag_map_error:
- /* Unmap SKB mappings that succeeded */
+ /* Unmap SKB mappings that succeeded and disable descriptor */
for (i = 0; greth->tx_next + i != curr_tx; i++) {
bdp = greth->tx_bd_base + greth->tx_next + i;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
DMA_TO_DEVICE);
+ greth_write_bd(&bdp->stat, 0);
}
map_error:
if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
return err;
}
-
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct greth_private *greth;
- u32 status;
+ u32 status, ctrl;
irqreturn_t retval = IRQ_NONE;
greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
/* Get the interrupt events that caused us to be here. */
status = GRETH_REGLOAD(greth->regs->status);
- /* Handle rx and tx interrupts through poll */
- if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
- /* Clear interrupt status */
- GRETH_REGORIN(greth->regs->status,
- status & (GRETH_INT_RX | GRETH_INT_TX));
+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+ * set regardless of whether IRQ is enabled or not. Especially
+ * important when shared IRQ.
+ */
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Handle rx and tx interrupts through poll */
+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
retval = IRQ_HANDLED;
/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
while (1) {
bdp = greth->tx_bd_base + greth->tx_last;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
stat = greth_read_bd(&bdp->stat);
if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
/* We only clean fully completed SKBs */
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
- stat = bdp_last_frag->stat;
+
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp_last_frag->stat);
if (stat & GRETH_BD_EN)
break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
- if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
- netif_wake_queue(dev);
- }
-}
-static int greth_pending_packets(struct greth_private *greth)
-{
- struct greth_bd *bdp;
- u32 status;
- bdp = greth->rx_bd_base + greth->rx_cur;
- status = greth_read_bd(&bdp->stat);
- if (status & GRETH_BD_EN)
- return 0;
- else
- return 1;
+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
}
static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
int pkt_len;
int bad, count;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
- dma_addr = greth_read_bd(&bdp->addr);
- bad = 0;
if (unlikely(status & GRETH_BD_EN)) {
break;
}
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
int pkt_len;
int bad, count = 0;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
bdp = greth->rx_bd_base + greth->rx_cur;
skb = greth->rx_skbuff[greth->rx_cur];
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
bad = 0;
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
}
}
- /* Allocate new skb to replace current */
- newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
- if (!bad && newskb) {
+ /* Allocate new skb to replace current, not needed if the
+ * current skb can be reused */
+ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
skb_reserve(newskb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
if (net_ratelimit())
dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
dev_kfree_skb(newskb);
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
+ } else if (bad) {
+ /* Bad Frame transfer, the skb is reused */
+ dev->stats.rx_dropped++;
} else {
+ /* Failed Allocating a new skb. This is rather stupid
+ * but the current "filled" skb is reused, as if
+ * transfer failure. One could argue that RX descriptor
+ * table handling should be divided into cleaning and
+ * filling as the TX part of the driver
+ */
if (net_ratelimit())
dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
wmb();
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags);
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
{
struct greth_private *greth;
int work_done = 0;
+ unsigned long flags;
+ u32 mask, ctrl;
greth = container_of(napi, struct greth_private, napi);
- if (greth->gbit_mac) {
- greth_clean_tx_gbit(greth->netdev);
- } else {
- greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+ if (netif_queue_stopped(greth->netdev)) {
+ if (greth->gbit_mac)
+ greth_clean_tx_gbit(greth->netdev);
+ else
+ greth_clean_tx(greth->netdev);
}
-restart_poll:
if (greth->gbit_mac) {
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
@@ -949,15 +976,29 @@ restart_poll:
if (work_done < budget) {
- napi_complete(napi);
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ if (netif_queue_stopped(greth->netdev)) {
+ GRETH_REGSAVE(greth->regs->control,
+ ctrl | GRETH_TXI | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE |
+ GRETH_INT_TX | GRETH_INT_TE;
+ } else {
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE;
+ }
- if (greth_pending_packets(greth)) {
- napi_reschedule(napi);
- goto restart_poll;
+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ goto restart_txrx_poll;
+ } else {
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&greth->devlock, flags);
}
}
- greth_enable_irqs(greth);
return work_done;
}
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
};
static struct net_device_ops greth_netdev_ops = {
- .ndo_open = greth_open,
- .ndo_stop = greth_close,
- .ndo_start_xmit = greth_start_xmit,
- .ndo_set_mac_address = greth_set_mac_add,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
};
static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phydev = greth->phy;
unsigned long flags;
-
int status_change = 0;
+ u32 ctrl;
spin_lock_irqsave(&greth->devlock, flags);
if (phydev->link) {
if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
- GRETH_REGANDIN(greth->regs->control,
- ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+ ctrl = GRETH_REGLOAD(greth->regs->control) &
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
if (phydev->duplex)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
-
- if (phydev->speed == SPEED_100) {
-
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
- }
+ ctrl |= GRETH_CTRL_FD;
+ if (phydev->speed == SPEED_100)
+ ctrl |= GRETH_CTRL_SP;
else if (phydev->speed == SPEED_1000)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+ ctrl |= GRETH_CTRL_GB;
+ GRETH_REGSAVE(greth->regs->control, ctrl);
greth->speed = phydev->speed;
greth->duplex = phydev->duplex;
status_change = 1;
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = {
{
.name = "GAISLER_ETHMAC",
},
+ {
+ .name = "01_01d",
+ },
{},
};
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903cd676..be0f2062bd14 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
#define GRETH_BD_LEN 0x7FF
#define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
#define GRETH_INT_TX 0x8
#define GRETH_TXI 0x4
#define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
#define GRETH_TXBD_ERR_UE 0x4000
#define GRETH_TXBD_ERR_AL 0x8000
+#define GRETH_INT_RE 0x1
#define GRETH_INT_RX 0x4
#define GRETH_RXEN 0x2
#define GRETH_RXI 0x8
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a060610a42db..602078b84892 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- struct net_device *netdev = tx_ring->netdev;
- struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
ixgbe_atr(tx_ring, skb, tx_flags, protocol);
- txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
- txq->tx_bytes += skb->len;
- txq->tx_packets++;
ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
@@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
- /* accurate rx/tx bytes/packets stats */
- dev_txq_stats_fold(netdev, stats);
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
stats->rx_bytes += bytes;
}
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845affea13..5933621ac3ff 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@ err:
rcu_read_lock_bh();
vlan = rcu_dereference(q->vlan);
if (vlan)
- netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
return err;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bb8645ab247c..bde7d61f1930 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -554,6 +554,8 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
u32 saved_wolopts;
+
+ const struct firmware *fw;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1766,6 +1768,29 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
}
}
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+ release_firmware(tp->fw);
+ tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+ const struct firmware **fw = &tp->fw;
+ int rc = !*fw;
+
+ if (rc) {
+ rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto out;
+ }
+
+ /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ rtl_phy_write_fw(tp, *fw);
+out:
+ return rc;
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -2139,7 +2164,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
void __iomem *ioaddr = tp->mmio_addr;
- const struct firmware *fw;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2203,11 +2227,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if (rtl_readphy(tp, 0x06) == 0xbf00 &&
- request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
- rtl_phy_write_fw(tp, fw);
- release_firmware(fw);
- } else {
+ if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
}
@@ -2257,7 +2278,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
void __iomem *ioaddr = tp->mmio_addr;
- const struct firmware *fw;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2312,11 +2332,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if (rtl_readphy(tp, 0x06) == 0xb300 &&
- request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
- rtl_phy_write_fw(tp, fw);
- release_firmware(fw);
- } else {
+ if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
}
@@ -3200,6 +3217,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&tp->task);
+ rtl_release_firmware(tp);
+
unregister_netdev(dev);
if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c6e675..002bac743843 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void)
int count;
int cpu;
+ if (rss_cpus)
+ return rss_cpus;
+
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
- unsigned tx_channel_offset =
- separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
- type >= EFX_TXQ_TYPES);
- return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- unsigned tx_channel_offset =
+
+ efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* Channel pointers were set in efx_init_struct() but we now
* need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel(channel, efx) {
- if (channel->channel - tx_channel_offset >=
+ if (channel->channel - efx->tx_channel_offset >=
efx->n_tx_channels) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66ddf93a..28df8665256a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@ struct efx_nic {
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
+ unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
(_efx)->channel[_channel->channel + 1] : NULL)
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5ec65b..7cb301da7474 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
MODULE_AUTHOR("Tilera");
MODULE_LICENSE("GPL");
-
-#define IS_MULTICAST(mac_addr) \
- (((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
- (((u16 *)(mac_addr))[0] == 0xffff)
-
-
/*
* Queue of incoming packets for a specific cpu and device.
*
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/*
* FIXME: Implement HW multicast filter.
*/
- if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+ if (is_unicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 73a3e0d93237..715e7b47e7e9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2032,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(ha->addr[0] & 1))
+ if (!is_multicast_ether_addr(ha->addr))
continue;
/* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104ab199..d776c4a8d3c1 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1021,13 +1021,15 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
(temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
pr_debug("invalid frame detected (ignored)"
"offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, temp, skb);
+ x, offset, temp, skb_in);
if (!x)
goto error;
break;
} else {
skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!skb)
+ goto error;
skb->len = temp;
skb->data = ((u8 *)skb_in->data) + offset;
skb_set_tail_pointer(skb, temp);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b568f1b0..c81a6512c683 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
"hotplug event.\n");
out:
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa13e36..ea2e7d714bda 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
&adc_dc_cal_multi_sample;
}
ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+ if (AR_SREV_9287(ah))
+ ah->supp_cals &= ~ADC_GAIN_CAL;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141f2006..749a93608664 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
eep->baseEepHeader.pwdclkind == 0)
ah->need_an_top2_fixup = 1;
+ if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+ (AR_SREV_9280(ah)))
+ eep->modalHeader[0].xpaBiasLvl = 0;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e87ed3..1ce506f23110 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
enum htc_endpoint_id ep_id, bool txok);
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
void ath9k_htc_station_work(struct work_struct *work);
void ath9k_htc_aggr_work(struct work_struct *work);
void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c938d16..f4d576bc3ccd 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
priv->nstations++;
+ /*
+ * Set chainmask etc. on the target.
+ */
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Failed to update capability in target\n");
+
+ priv->ah->is_monitoring = true;
+
return 0;
err_vif:
@@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
}
priv->nstations--;
+ priv->ah->is_monitoring = false;
return 0;
}
@@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
return 0;
}
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
{
struct ath9k_htc_cap_target tcap;
int ret;
@@ -1186,6 +1197,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ /*
+ * Monitor interface should be added before
+ * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+ */
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_err(common, "Failed to set monitor mode\n");
+ else
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -1221,16 +1246,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_update_txpow(priv);
}
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR) {
- if (ath9k_htc_add_monitor_interface(priv))
- ath_err(common, "Failed to set monitor mode\n");
- else
- ath_dbg(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
mutex_lock(&priv->htc_pm_lock);
if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde978665e07..1afb8bb85756 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -436,9 +436,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
static int ath9k_hw_post_init(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
int ecode;
- if (!AR_SREV_9271(ah)) {
+ if (common->bus_ops->ath_bus_type != ATH_USB) {
if (!ath9k_hw_chip_test(ah))
return -ENODEV;
}
@@ -1213,7 +1214,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if (!ah->chip_fullsleep) {
+ if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
ath9k_hw_abortpcurecv(ah);
if (!ath9k_hw_stopdmarecv(ah)) {
ath_dbg(common, ATH_DBG_XMIT,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a4134edeb..2176edede39b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
hw_priv->link = link;
/*
- * Make sure the IRQ handler cannot proceed until at least
- * dev->base_addr is initialized.
+ * We enable IRQ here, but IRQ handler will not proceed
+ * until dev->base_addr is set below. This protect us from
+ * receive interrupts when driver is not initialized.
*/
- spin_lock_irqsave(&local->irq_init_lock, flags);
-
ret = pcmcia_request_irq(link, prism2_interrupt);
if (ret)
- goto failed_unlock;
+ goto failed;
ret = pcmcia_enable_device(link);
if (ret)
- goto failed_unlock;
+ goto failed;
+ spin_lock_irqsave(&local->irq_init_lock, flags);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
-
spin_unlock_irqrestore(&local->irq_init_lock, flags);
local->shutdown = 0;
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
return ret;
- failed_unlock:
- spin_unlock_irqrestore(&local->irq_init_lock, flags);
failed:
kfree(hw_priv);
prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f6f46f..ae438ed80c2f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
inta = ipw_read32(priv, IPW_INTA_RW);
inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+ if (inta == 0xFFFFFFFF) {
+ /* Hardware disappeared */
+ IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+ /* Only handle the cached INTA values */
+ inta = 0;
+ }
inta &= (IPW_INTA_MASK_ALL & inta_mask);
/* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a7dc7..f618b9623e5a 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
else
*burst_possible = false;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index 401c44b6eadb..bae647264dd6 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -69,7 +69,7 @@ struct pn544_info {
struct mutex read_mutex; /* Serialize read_irq access */
struct mutex mutex; /* Serialize info struct access */
u8 *buf;
- unsigned int buflen;
+ size_t buflen;
};
static const char reg_vdd_io[] = "Vdd_IO";
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b65e65aa07eb..e56730214c05 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -990,30 +990,51 @@ out:
static void set_media_not_present(struct scsi_disk *sdkp)
{
- sdkp->media_present = 0;
- sdkp->capacity = 0;
- sdkp->device->changed = 1;
+ if (sdkp->media_present)
+ sdkp->device->changed = 1;
+
+ if (sdkp->device->removable) {
+ sdkp->media_present = 0;
+ sdkp->capacity = 0;
+ }
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+ struct scsi_sense_hdr *sshdr)
+{
+ if (!scsi_sense_valid(sshdr))
+ return 0;
+
+ /* not invoked for commands that could return deferred errors */
+ switch (sshdr->sense_key) {
+ case UNIT_ATTENTION:
+ case NOT_READY:
+ /* medium not present */
+ if (sshdr->asc == 0x3A) {
+ set_media_not_present(sdkp);
+ return 1;
+ }
+ }
+ return 0;
}
/**
- * sd_media_changed - check if our medium changed
- * @disk: kernel device descriptor
+ * sd_check_events - check media events
+ * @disk: kernel device descriptor
+ * @clearing: disk events currently being cleared
*
- * Returns 0 if not applicable or no change; 1 if change
+ * Returns mask of DISK_EVENT_*.
*
* Note: this function is invoked from the block subsystem.
**/
-static int sd_media_changed(struct gendisk *disk)
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
- SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
-
- if (!sdp->removable)
- return 0;
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
* If the device is offline, don't send any commands - just pretend as
@@ -1043,40 +1064,32 @@ static int sd_media_changed(struct gendisk *disk)
sshdr);
}
- if (retval) {
+ /* failed to execute TUR, assume media not present */
+ if (host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
+ if (media_not_present(sdkp, sshdr))
+ goto out;
+
/*
* For removable scsi disk we have to recognise the presence
- * of a disk in the drive. This is kept in the struct scsi_disk
- * struct and tested at open ! Daniel Roche (dan@lectra.fr)
+ * of a disk in the drive.
*/
+ if (!sdkp->media_present)
+ sdp->changed = 1;
sdkp->media_present = 1;
-
out:
/*
- * Report a media change under the following conditions:
- *
- * Medium is present now and wasn't present before.
- * Medium wasn't present before and is present now.
- * Medium was present at all times, but it changed while
- * we weren't looking (sdp->changed is set).
+ * sdp->changed is set under the following conditions:
*
- * If there was no medium before and there is no medium now then
- * don't report a change, even if a medium was inserted and removed
- * while we weren't looking.
+ * Medium present state has changed in either direction.
+ * Device has indicated UNIT_ATTENTION.
*/
- retval = (sdkp->media_present != sdkp->previous_state ||
- (sdkp->media_present && sdp->changed));
- if (retval)
- sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
- sdkp->previous_state = sdkp->media_present;
-
- /* sdp->changed indicates medium was changed or is not present */
- sdp->changed = !sdkp->media_present;
kfree(sshdr);
+ retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ sdp->changed = 0;
return retval;
}
@@ -1169,7 +1182,7 @@ static const struct block_device_operations sd_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
#endif
- .media_changed = sd_media_changed,
+ .check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
};
@@ -1312,23 +1325,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
return good_bytes;
}
-static int media_not_present(struct scsi_disk *sdkp,
- struct scsi_sense_hdr *sshdr)
-{
-
- if (!scsi_sense_valid(sshdr))
- return 0;
- /* not invoked for commands that could return deferred errors */
- if (sshdr->sense_key != NOT_READY &&
- sshdr->sense_key != UNIT_ATTENTION)
- return 0;
- if (sshdr->asc != 0x3A) /* medium not present */
- return 0;
-
- set_media_not_present(sdkp);
- return 1;
-}
-
/*
* spinup disk - called only in sd_revalidate_disk()
*/
@@ -1503,7 +1499,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
*/
if (sdp->removable &&
sense_valid && sshdr->sense_key == NOT_READY)
- sdp->changed = 1;
+ set_media_not_present(sdkp);
/*
* We used to set media_present to 0 here to indicate no media
@@ -2389,8 +2385,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
- if (sdp->removable)
+ if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
+ gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ }
add_disk(gd);
sd_dif_config_host(sdkp);
@@ -2472,7 +2470,6 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
- sdkp->previous_state = 1;
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488faf0815..c9d8f6ca49e2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@ struct scsi_disk {
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
- unsigned previous_state : 1;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index be6baf8ad704..aefadc6a1607 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -249,10 +249,6 @@ skip_tur:
cd->device->changed = 0;
}
- /* for backward compatibility */
- if (events & DISK_EVENT_MEDIA_CHANGE)
- sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
- GFP_KERNEL);
return events;
}
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 7ac2bf5167cd..2335edafe903 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -883,10 +883,10 @@ static struct uart_ops s3c24xx_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
- .dev_name = "s3c2410_serial",
+ .driver_name = "s3c2410_serial",
.nr = CONFIG_SERIAL_SAMSUNG_UARTS,
.cons = S3C24XX_SERIAL_CONSOLE,
- .driver_name = S3C24XX_SERIAL_NAME,
+ .dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1906840c1113..13bfa9d48082 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -156,10 +156,10 @@ config SPI_IMX_VER_0_4
def_bool y if ARCH_MX31
config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51
+ def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
config SPI_IMX_VER_2_3
- def_bool y if ARCH_MX51
+ def_bool y if ARCH_MX51 || ARCH_MX53
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
@@ -310,8 +310,8 @@ config SPI_S3C24XX_GPIO
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on ARCH_S3C64XX && EXPERIMENTAL
- select S3C64XX_DMA
+ depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+ select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index a2a5921c730a..71a1219a995d 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1795,7 +1795,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq;
+ struct ssp_clock_params clk_freq = {0, };
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9c1b24..2fa012c109bc 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -68,8 +69,8 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
}
dwsmmio->clk = clk_get(&pdev->dev, NULL);
- if (!dwsmmio->clk) {
- ret = -ENODEV;
+ if (IS_ERR(dwsmmio->clk)) {
+ ret = PTR_ERR(dwsmmio->clk);
goto err_irq;
}
clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 9469564e6888..1cf9d5faabf4 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -743,6 +743,12 @@ static struct platform_device_id spi_imx_devtype[] = {
.name = "imx51-ecspi",
.driver_data = SPI_IMX_VER_2_3,
}, {
+ .name = "imx53-cspi",
+ .driver_data = SPI_IMX_VER_0_7,
+ }, {
+ .name = "imx53-ecspi",
+ .driver_data = SPI_IMX_VER_2_3,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02a5472..891e5909038c 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
}
tspi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR_OR_NULL(tspi->clk)) {
+ if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
goto err2;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d4ce15..29884c00c4d5 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@ int ssb_bus_scan(struct ssb_bus *bus,
bus->pcicore.dev = dev;
#endif /* CONFIG_SSB_DRIVER_PCICORE */
break;
+ case SSB_DEV_ETHERNET:
+ if (bus->bustype == SSB_BUSTYPE_PCI) {
+ if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (bus->host_pci->device & 0xFF00) == 0x4300) {
+ /* This is a dangling ethernet core on a
+ * wireless device. Ignore it. */
+ continue;
+ }
+ }
+ break;
default:
break;
}
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index d3f42c8325f7..a08bd7355035 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -88,14 +88,13 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
}
path.mnt = mnt;
path_get(&path);
- if (!follow_down(&path)) {
+ if (!follow_down_one(&path)) {
path_put(&path);
DPRINTK(("autofs: not expirable\
(not a mounted directory): %s\n", ent->name));
continue;
}
- while (d_mountpoint(path.dentry) && follow_down(&path))
- ;
+ follow_down(&path, false); // TODO: need to check error
umount_ok = may_umount(path.mnt);
path_put(&path);
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index 87a3a9bd5842..f204d33910ec 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@ static int smb_compare_dentry(const struct dentry *,
unsigned int, const char *, const struct qstr *);
static int smb_delete_dentry(const struct dentry *);
-static const struct dentry_operations smbfs_dentry_operations =
+const struct dentry_operations smbfs_dentry_operations =
{
.d_revalidate = smb_lookup_validate,
.d_hash = smb_hash_dentry,
@@ -291,7 +291,7 @@ static const struct dentry_operations smbfs_dentry_operations =
.d_delete = smb_delete_dentry,
};
-static const struct dentry_operations smbfs_dentry_operations_case =
+const struct dentry_operations smbfs_dentry_operations_case =
{
.d_revalidate = smb_lookup_validate,
.d_delete = smb_delete_dentry,
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 000000000000..2fac3be209ac
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
+
+menuconfig TARGET_CORE
+ tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+ depends on SCSI && BLOCK
+ select CONFIGFS_FS
+ default n
+ help
+ Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+ control path for target_core_mod. This includes built-in TCM RAMDISK
+ subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+ tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ help
+ Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+ access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+ tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+ help
+ Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+ access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+ tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ help
+ Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+ passthrough access to Linux/SCSI device
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 000000000000..5cfd70819f08
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,24 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
+
+target_core_mod-y := target_core_configfs.o \
+ target_core_device.o \
+ target_core_fabric_configfs.o \
+ target_core_fabric_lib.o \
+ target_core_hba.o \
+ target_core_pr.o \
+ target_core_alua.o \
+ target_core_scdb.o \
+ target_core_tmr.o \
+ target_core_tpg.o \
+ target_core_transport.o \
+ target_core_cdb.o \
+ target_core_ua.o \
+ target_core_rd.o \
+ target_core_mib.o
+
+obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
+obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 000000000000..2c5fcfed5934
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+ * Filename: target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port, int explict, int offline);
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+ struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_port *port;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+ Target port group descriptor */
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ /*
+ * PREF: Preferred target port bit, determine if this
+ * bit should be set for port group.
+ */
+ if (tg_pt_gp->tg_pt_gp_pref)
+ buf[off] = 0x80;
+ /*
+ * Set the ASYMMETRIC ACCESS State
+ */
+ buf[off++] |= (atomic_read(
+ &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+ /*
+ * Set supported ASYMMETRIC ACCESS State bits
+ */
+ buf[off] = 0x80; /* T_SUP */
+ buf[off] |= 0x40; /* O_SUP */
+ buf[off] |= 0x8; /* U_SUP */
+ buf[off] |= 0x4; /* S_SUP */
+ buf[off] |= 0x2; /* AN_SUP */
+ buf[off++] |= 0x1; /* AO_SUP */
+ /*
+ * TARGET PORT GROUP
+ */
+ buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+ buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+ off++; /* Skip over Reserved */
+ /*
+ * STATUS CODE
+ */
+ buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+ /*
+ * Vendor Specific field
+ */
+ buf[off++] = 0x00;
+ /*
+ * TARGET PORT COUNT
+ */
+ buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+ rd_len += 8;
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = tg_pt_gp_mem->tg_pt;
+ /*
+ * Start Target Port descriptor format
+ *
+ * See spc4r17 section 6.2.7 Table 247
+ */
+ off += 2; /* Skip over Obsolete */
+ /*
+ * Set RELATIVE TARGET PORT IDENTIFIER
+ */
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ rd_len += 4;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ buf[0] = ((rd_len >> 24) & 0xff);
+ buf[1] = ((rd_len >> 16) & 0xff);
+ buf[2] = ((rd_len >> 8) & 0xff);
+ buf[3] = (rd_len & 0xff);
+
+ return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
+ struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+ struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+ u32 len = 4; /* Skip over RESERVED area in header */
+ int alua_access_state, primary = 0, rc;
+ u16 tg_pt_id, rtpi;
+
+ if (!(l_port))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ /*
+ * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * for the local tg_pt_gp.
+ */
+ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+ if (!(l_tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+ if (!(l_tg_pt_gp)) {
+ spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+ spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ if (!(rc)) {
+ printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+ " while TPGS_EXPLICT_ALUA is disabled\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
+ while (len < cmd->data_length) {
+ alua_access_state = (ptr[0] & 0x0f);
+ /*
+ * Check the received ALUA access state, and determine if
+ * the state is a primary or secondary target port asymmetric
+ * access state.
+ */
+ rc = core_alua_check_transition(alua_access_state, &primary);
+ if (rc != 0) {
+ /*
+ * If the SET TARGET PORT GROUPS attempts to establish
+ * an invalid combination of target port asymmetric
+ * access states or attempts to establish an
+ * unsupported target port asymmetric access state,
+ * then the command shall be terminated with CHECK
+ * CONDITION status, with the sense key set to ILLEGAL
+ * REQUEST, and the additional sense code set to INVALID
+ * FIELD IN PARAMETER LIST.
+ */
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ rc = -1;
+ /*
+ * If the ASYMMETRIC ACCESS STATE field (see table 267)
+ * specifies a primary target port asymmetric access state,
+ * then the TARGET PORT GROUP OR TARGET PORT field specifies
+ * a primary target port group for which the primary target
+ * port asymmetric access state shall be changed. If the
+ * ASYMMETRIC ACCESS STATE field specifies a secondary target
+ * port asymmetric access state, then the TARGET PORT GROUP OR
+ * TARGET PORT field specifies the relative target port
+ * identifier (see 3.1.120) of the target port for which the
+ * secondary target port asymmetric access state shall be
+ * changed.
+ */
+ if (primary) {
+ tg_pt_id = ((ptr[2] << 8) & 0xff);
+ tg_pt_id |= (ptr[3] & 0xff);
+ /*
+ * Locate the matching target port group ID from
+ * the global tg_pt_gp list
+ */
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp,
+ &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+
+ if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+ continue;
+
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ rc = core_alua_do_port_transition(tg_pt_gp,
+ dev, l_port, nacl,
+ alua_access_state, 1);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ break;
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * If not matching target port group ID can be located
+ * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+ */
+ if (rc != 0)
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ } else {
+ /*
+ * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+ * the Target Port in question for the the incoming
+ * SET_TARGET_PORT_GROUPS op.
+ */
+ rtpi = ((ptr[2] << 8) & 0xff);
+ rtpi |= (ptr[3] & 0xff);
+ /*
+ * Locate the matching relative target port identifer
+ * for the struct se_device storage object.
+ */
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(port, &dev->dev_sep_list,
+ sep_list) {
+ if (port->sep_rtpi != rtpi)
+ continue;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ spin_unlock(&dev->se_port_lock);
+
+ rc = core_alua_set_tg_pt_secondary_state(
+ tg_pt_gp_mem, port, 1, 1);
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+ /*
+ * If not matching relative target port identifier can
+ * be located, throw an exception with ASCQ:
+ * INVALID_PARAMETER_LIST
+ */
+ if (rc != 0)
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+
+ ptr += 4;
+ len += 4;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_nonoptimized(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ int nonop_delay_msecs,
+ u8 *alua_ascq)
+{
+ /*
+ * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+ * later to determine if processing of this cmd needs to be
+ * temporarily delayed for the Active/NonOptimized primary access state.
+ */
+ cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+ cmd->alua_nonop_delay = nonop_delay_msecs;
+ return 0;
+}
+
+static inline int core_alua_state_standby(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+ * spc4r17 section 5.9.2.4.4
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case LOG_SELECT:
+ case LOG_SENSE:
+ case MODE_SELECT:
+ case MODE_SENSE:
+ case REPORT_LUNS:
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+ case MAINTENANCE_OUT:
+ switch (cdb[1]) {
+ case MO_SET_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_unavailable(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+ * spc4r17 section 5.9.2.4.5
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ case MAINTENANCE_OUT:
+ switch (cdb[1]) {
+ case MO_SET_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_transition(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * spc4r17 section 5.9.2.5
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer(). This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_port *port = lun->lun_sep;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ int out_alua_state, nonop_delay_msecs;
+
+ if (!(port))
+ return 0;
+ /*
+ * First, check for a struct se_port specific secondary ALUA target port
+ * access state: OFFLINE
+ */
+ if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+ *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ printk(KERN_INFO "ALUA: Got secondary offline status for local"
+ " target port\n");
+ *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ return 1;
+ }
+ /*
+ * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+ * ALUA target port group, to obtain current ALUA access state.
+ * Otherwise look for the underlying struct se_device association with
+ * a ALUA logical unit group.
+ */
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ /*
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
+ * statement so the complier knows explictly to check this case first.
+ * For the Optimized ALUA access state case, we want to process the
+ * incoming fabric cmd ASAP..
+ */
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ return 0;
+
+ switch (out_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ return core_alua_state_nonoptimized(cmd, cdb,
+ nonop_delay_msecs, alua_ascq);
+ case ALUA_ACCESS_STATE_STANDBY:
+ return core_alua_state_standby(cmd, cdb, alua_ascq);
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return core_alua_state_transition(cmd, cdb, alua_ascq);
+ /*
+ * OFFLINE is a secondary ALUA target port group access state, that is
+ * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+ */
+ case ALUA_ACCESS_STATE_OFFLINE:
+ default:
+ printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+ out_alua_state);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+ switch (state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ case ALUA_ACCESS_STATE_STANDBY:
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_OFFLINE:
+ /*
+ * OFFLINE state is defined as a secondary target port
+ * asymmetric access state.
+ */
+ *primary = 0;
+ break;
+ default:
+ printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+ return -1;
+ }
+
+ return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+ switch (state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ return "Active/Optimized";
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_STANDBY:
+ return "Standby";
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ return "Unavailable";
+ case ALUA_ACCESS_STATE_OFFLINE:
+ return "Offline";
+ default:
+ return "Unknown";
+ }
+
+ return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+ switch (status) {
+ case ALUA_STATUS_NONE:
+ return "None";
+ case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+ return "Altered by Explict STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+ return "Altered by Implict ALUA";
+ default:
+ return "Unknown";
+ }
+
+ return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+ struct se_cmd *cmd)
+{
+ if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+ return 0;
+ if (in_interrupt())
+ return 0;
+ /*
+ * The ALUA Active/NonOptimized access state delay can be disabled
+ * in via configfs with a value of zero
+ */
+ if (!(cmd->alua_nonop_delay))
+ return 0;
+ /*
+ * struct se_cmd->alua_nonop_delay gets set by a target port group
+ * defined interval in core_alua_state_nonoptimized()
+ */
+ msleep_interruptible(cmd->alua_nonop_delay);
+ return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+ const char *path,
+ unsigned char *md_buf,
+ u32 md_buf_len)
+{
+ mm_segment_t old_fs;
+ struct file *file;
+ struct iovec iov[1];
+ int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+ memset(iov, 0, sizeof(struct iovec));
+
+ file = filp_open(path, flags, 0600);
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+ path);
+ return -ENODEV;
+ }
+
+ iov[0].iov_base = &md_buf[0];
+ iov[0].iov_len = md_buf_len;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+ set_fs(old_fs);
+
+ if (ret < 0) {
+ printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+ filp_close(file, NULL);
+ return -EIO;
+ }
+ filp_close(file, NULL);
+
+ return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int primary_state,
+ unsigned char *md_buf)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_wwn *wwn = &su_dev->t10_wwn;
+ char path[ALUA_METADATA_PATH_LEN];
+ int len;
+
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+ len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ "tg_pt_gp_id=%hu\n"
+ "alua_access_state=0x%02x\n"
+ "alua_access_status=0x%02x\n",
+ tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_alua_access_status);
+
+ snprintf(path, ALUA_METADATA_PATH_LEN,
+ "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+ return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ struct se_port *l_port,
+ struct se_node_acl *nacl,
+ unsigned char *md_buf,
+ int new_state,
+ int explict)
+{
+ struct se_dev_entry *se_deve;
+ struct se_lun_acl *lacl;
+ struct se_port *port;
+ struct t10_alua_tg_pt_gp_member *mem;
+ int old_state = 0;
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_TRANSITION);
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = mem->tg_pt;
+ /*
+ * After an implicit target port asymmetric access state
+ * change, a device server shall establish a unit attention
+ * condition for the initiator port associated with every I_T
+ * nexus with the additional sense code set to ASYMMETRIC
+ * ACCESS STATE CHAGED.
+ *
+ * After an explicit target port asymmetric access state
+ * change, a device server shall establish a unit attention
+ * condition with the additional sense code set to ASYMMETRIC
+ * ACCESS STATE CHANGED for the initiator port associated with
+ * every I_T nexus other than the I_T nexus on which the SET
+ * TARGET PORT GROUPS command
+ */
+ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_for_each_entry(se_deve, &port->sep_alua_list,
+ alua_port_list) {
+ lacl = se_deve->se_lun_acl;
+ /*
+ * se_deve->se_lun_acl pointer may be NULL for a
+ * entry created without explict Node+MappedLUN ACLs
+ */
+ if (!(lacl))
+ continue;
+
+ if (explict &&
+ (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+ (l_port != NULL) && (l_port == port))
+ continue;
+
+ core_scsi3_ua_allocate(lacl->se_lun_nacl,
+ se_deve->mapped_lun, 0x2A,
+ ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+ }
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ /*
+ * Update the ALUA metadata buf that has been allocated in
+ * core_alua_do_port_transition(), this metadata will be written
+ * to struct file.
+ *
+ * Note that there is the case where we do not want to update the
+ * metadata when the saved metadata is being parsed in userspace
+ * when setting the existing port access state and access status.
+ *
+ * Also note that the failure to write out the ALUA metadata to
+ * struct file does NOT affect the actual ALUA transition.
+ */
+ if (tg_pt_gp->tg_pt_gp_write_metadata) {
+ mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp,
+ new_state, md_buf);
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+ }
+ /*
+ * Set the current primary ALUA access state to the requested new state
+ */
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+ printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ " from primary access state %s to %s\n", (explict) ? "explict" :
+ "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+ core_alua_dump_state(new_state));
+
+ return 0;
+}
+
+int core_alua_do_port_transition(
+ struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+ struct se_device *l_dev,
+ struct se_port *l_port,
+ struct se_node_acl *l_nacl,
+ int new_state,
+ int explict)
+{
+ struct se_device *dev;
+ struct se_port *port;
+ struct se_subsystem_dev *su_dev;
+ struct se_node_acl *nacl;
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ unsigned char *md_buf;
+ int primary;
+
+ if (core_alua_check_transition(new_state, &primary) != 0)
+ return -EINVAL;
+
+ md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+ if (!(md_buf)) {
+ printk("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
+
+ local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+ spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = local_lu_gp_mem->lu_gp;
+ atomic_inc(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+ /*
+ * For storage objects that are members of the 'default_lu_gp',
+ * we only do transition on the passed *l_tp_pt_gp, and not
+ * on all of the matching target port groups IDs in default_lu_gp.
+ */
+ if (!(lu_gp->lu_gp_id)) {
+ /*
+ * core_alua_do_transition_tg_pt() will always return
+ * success.
+ */
+ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+ md_buf, new_state, explict);
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ kfree(md_buf);
+ return 0;
+ }
+ /*
+ * For all other LU groups aside from 'default_lu_gp', walk all of
+ * the associated storage objects looking for a matching target port
+ * group ID from the local target port group.
+ */
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+ lu_gp_mem_list) {
+
+ dev = lu_gp_mem->lu_gp_mem_dev;
+ su_dev = dev->se_sub_dev;
+ atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp,
+ &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+ /*
+ * If the target behavior port asymmetric access state
+ * is changed for any target port group accessiable via
+ * a logical unit within a LU group, the target port
+ * behavior group asymmetric access states for the same
+ * target port group accessible via other logical units
+ * in that LU group will also change.
+ */
+ if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+ continue;
+
+ if (l_tg_pt_gp == tg_pt_gp) {
+ port = l_port;
+ nacl = l_nacl;
+ } else {
+ port = NULL;
+ nacl = NULL;
+ }
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * core_alua_do_transition_tg_pt() will always return
+ * success.
+ */
+ core_alua_do_transition_tg_pt(tg_pt_gp, port,
+ nacl, md_buf, new_state, explict);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+ core_alua_dump_state(new_state));
+
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ kfree(md_buf);
+ return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port,
+ unsigned char *md_buf,
+ u32 md_buf_len)
+{
+ struct se_portal_group *se_tpg = port->sep_tpg;
+ char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ int len;
+
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+ memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+ len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+ TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+
+ if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+ snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+ len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ "alua_tg_pt_status=0x%02x\n",
+ atomic_read(&port->sep_tg_pt_secondary_offline),
+ port->sep_tg_pt_secondary_stat);
+
+ snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+ TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+ port->sep_lun->unpacked_lun);
+
+ return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port,
+ int explict,
+ int offline)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ unsigned char *md_buf;
+ u32 md_buf_len;
+ int trans_delay_msecs;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (!(tg_pt_gp)) {
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_ERR "Unable to complete secondary state"
+ " transition\n");
+ return -1;
+ }
+ trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+ /*
+ * Set the secondary ALUA target port access state to OFFLINE
+ * or release the previously secondary state for struct se_port
+ */
+ if (offline)
+ atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+ else
+ atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+ md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+ port->sep_tg_pt_secondary_stat = (explict) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+ printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ " to secondary access state: %s\n", (explict) ? "explict" :
+ "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ /*
+ * Do the optional transition delay after we set the secondary
+ * ALUA access state.
+ */
+ if (trans_delay_msecs != 0)
+ msleep_interruptible(trans_delay_msecs);
+ /*
+ * See if we need to update the ALUA fabric port metadata for
+ * secondary state and status
+ */
+ if (port->sep_tg_pt_secondary_write_md) {
+ md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+ if (!(md_buf)) {
+ printk(KERN_ERR "Unable to allocate md_buf for"
+ " secondary ALUA access metadata\n");
+ return -1;
+ }
+ mutex_lock(&port->sep_tg_pt_md_mutex);
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+ md_buf, md_buf_len);
+ mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+ kfree(md_buf);
+ }
+
+ return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+ struct t10_alua_lu_gp *lu_gp;
+
+ lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+ if (!(lu_gp)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+ return ERR_PTR(-ENOMEM);;
+ }
+ INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+ INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+ spin_lock_init(&lu_gp->lu_gp_lock);
+ atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+ if (def_group) {
+ lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
+ lu_gp->lu_gp_valid_id = 1;
+ se_global->alua_lu_gps_count++;
+ }
+
+ return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+ struct t10_alua_lu_gp *lu_gp_tmp;
+ u16 lu_gp_id_tmp;
+ /*
+ * The lu_gp->lu_gp_id may only be set once..
+ */
+ if (lu_gp->lu_gp_valid_id) {
+ printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+ " ignoring request\n");
+ return -1;
+ }
+
+ spin_lock(&se_global->lu_gps_lock);
+ if (se_global->alua_lu_gps_count == 0x0000ffff) {
+ printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+ " 0x0000ffff reached\n");
+ spin_unlock(&se_global->lu_gps_lock);
+ kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+ return -1;
+ }
+again:
+ lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+ se_global->alua_lu_gps_counter++;
+
+ list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+ if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+ if (!(lu_gp_id))
+ goto again;
+
+ printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+ " already exists, ignoring request\n",
+ lu_gp_id);
+ spin_unlock(&se_global->lu_gps_lock);
+ return -1;
+ }
+ }
+
+ lu_gp->lu_gp_id = lu_gp_id_tmp;
+ lu_gp->lu_gp_valid_id = 1;
+ list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
+ se_global->alua_lu_gps_count++;
+ spin_unlock(&se_global->lu_gps_lock);
+
+ return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
+ lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+ if (!(lu_gp_mem)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+ spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+ atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+ lu_gp_mem->lu_gp_mem_dev = dev;
+ dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+ return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+ struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+ /*
+ * Once we have reached this point, config_item_put() has
+ * already been called from target_core_alua_drop_lu_gp().
+ *
+ * Here, we remove the *lu_gp from the global list so that
+ * no associations can be made while we are releasing
+ * struct t10_alua_lu_gp.
+ */
+ spin_lock(&se_global->lu_gps_lock);
+ atomic_set(&lu_gp->lu_gp_shutdown, 1);
+ list_del(&lu_gp->lu_gp_list);
+ se_global->alua_lu_gps_count--;
+ spin_unlock(&se_global->lu_gps_lock);
+ /*
+ * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+ * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+ * released with core_alua_put_lu_gp_from_name()
+ */
+ while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+ cpu_relax();
+ /*
+ * Release reference to struct t10_alua_lu_gp * from all associated
+ * struct se_device.
+ */
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+ &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+ if (lu_gp_mem->lu_gp_assoc) {
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp->lu_gp_members--;
+ lu_gp_mem->lu_gp_assoc = 0;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+ /*
+ *
+ * lu_gp_mem is assoicated with a single
+ * struct se_device->dev_alua_lu_gp_mem, and is released when
+ * struct se_device is released via core_alua_free_lu_gp_mem().
+ *
+ * If the passed lu_gp does NOT match the default_lu_gp, assume
+ * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ */
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ if (lu_gp != se_global->default_lu_gp)
+ __core_alua_attach_lu_gp_mem(lu_gp_mem,
+ se_global->default_lu_gp);
+ else
+ lu_gp_mem->lu_gp = NULL;
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return;
+
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem))
+ return;
+
+ while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+ cpu_relax();
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ spin_lock(&lu_gp->lu_gp_lock);
+ if (lu_gp_mem->lu_gp_assoc) {
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp->lu_gp_members--;
+ lu_gp_mem->lu_gp_assoc = 0;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+ lu_gp_mem->lu_gp = NULL;
+ }
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+ struct t10_alua_lu_gp *lu_gp;
+ struct config_item *ci;
+
+ spin_lock(&se_global->lu_gps_lock);
+ list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
+ if (!(lu_gp->lu_gp_valid_id))
+ continue;
+ ci = &lu_gp->lu_gp_group.cg_item;
+ if (!(strcmp(config_item_name(ci), name))) {
+ atomic_inc(&lu_gp->lu_gp_ref_cnt);
+ spin_unlock(&se_global->lu_gps_lock);
+ return lu_gp;
+ }
+ }
+ spin_unlock(&se_global->lu_gps_lock);
+
+ return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&se_global->lu_gps_lock);
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ spin_unlock(&se_global->lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+ struct t10_alua_lu_gp_member *lu_gp_mem,
+ struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&lu_gp->lu_gp_lock);
+ lu_gp_mem->lu_gp = lu_gp;
+ lu_gp_mem->lu_gp_assoc = 1;
+ list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+ lu_gp->lu_gp_members++;
+ spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+ struct t10_alua_lu_gp_member *lu_gp_mem,
+ struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp_mem->lu_gp = NULL;
+ lu_gp_mem->lu_gp_assoc = 0;
+ lu_gp->lu_gp_members--;
+ spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+ struct se_subsystem_dev *su_dev,
+ const char *name,
+ int def_group)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+ tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+ if (!(tg_pt_gp)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+ mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+ spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+ atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+ tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+ tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ /*
+ * Enable both explict and implict ALUA support by default
+ */
+ tg_pt_gp->tg_pt_gp_alua_access_type =
+ TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ /*
+ * Set the default Active/NonOptimized Delay in milliseconds
+ */
+ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+ tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+ if (def_group) {
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ tg_pt_gp->tg_pt_gp_id =
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ tg_pt_gp->tg_pt_gp_valid_id = 1;
+ T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+ &T10_ALUA(su_dev)->tg_pt_gps_list);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ }
+
+ return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ u16 tg_pt_gp_id)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+ u16 tg_pt_gp_id_tmp;
+ /*
+ * The tg_pt_gp->tg_pt_gp_id may only be set once..
+ */
+ if (tg_pt_gp->tg_pt_gp_valid_id) {
+ printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+ " ignoring request\n");
+ return -1;
+ }
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
+ printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+ " 0x0000ffff reached\n");
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+ return -1;
+ }
+again:
+ tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+
+ list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+ if (!(tg_pt_gp_id))
+ goto again;
+
+ printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+ " exists, ignoring request\n", tg_pt_gp_id);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ return -1;
+ }
+ }
+
+ tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+ tg_pt_gp->tg_pt_gp_valid_id = 1;
+ list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+ &T10_ALUA(su_dev)->tg_pt_gps_list);
+ T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+ struct se_port *port)
+{
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+ GFP_KERNEL);
+ if (!(tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+ tg_pt_gp_mem->tg_pt = port;
+ port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+ atomic_set(&port->sep_tg_pt_gp_active, 1);
+
+ return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+ /*
+ * Once we have reached this point, config_item_put() has already
+ * been called from target_core_alua_drop_tg_pt_gp().
+ *
+ * Here we remove *tg_pt_gp from the global list so that
+ * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * can be made while we are releasing struct t10_alua_tg_pt_gp.
+ */
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_del(&tg_pt_gp->tg_pt_gp_list);
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+ * core_alua_get_tg_pt_gp_by_name() in
+ * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+ * to be released with core_alua_put_tg_pt_gp_from_name().
+ */
+ while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+ cpu_relax();
+ /*
+ * Release reference to struct t10_alua_tg_pt_gp from all associated
+ * struct se_port.
+ */
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+ &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+ if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members--;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ /*
+ * tg_pt_gp_mem is assoicated with a single
+ * se_port->sep_alua_tg_pt_gp_mem, and is released via
+ * core_alua_free_tg_pt_gp_mem().
+ *
+ * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+ * assume we want to re-assocate a given tg_pt_gp_mem with
+ * default_tg_pt_gp.
+ */
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ } else
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem))
+ return;
+
+ while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+ cpu_relax();
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members--;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ }
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+ struct se_subsystem_dev *su_dev,
+ const char *name)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct config_item *ci;
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+ ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+ if (!(strcmp(config_item_name(ci), name))) {
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ return tg_pt_gp;
+ }
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+ list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+ &tg_pt_gp->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members++;
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ tg_pt_gp->tg_pt_gp_members--;
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct config_item *tg_pt_ci;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ ssize_t len = 0;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return len;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem))
+ return len;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+ len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+ " %hu\nTG Port Primary Access State: %s\nTG Port "
+ "Primary Access Status: %s\nTG Port Secondary Access"
+ " State: %s\nTG Port Secondary Access Status: %s\n",
+ config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(atomic_read(
+ &tg_pt_gp->tg_pt_gp_alua_access_state)),
+ core_alua_dump_status(
+ tg_pt_gp->tg_pt_gp_alua_access_status),
+ (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+ "Offline" : "None",
+ core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+ }
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+ struct se_port *port,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *tpg;
+ struct se_lun *lun;
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char buf[TG_PT_GROUP_NAME_BUF];
+ int move = 0;
+
+ tpg = port->sep_tpg;
+ lun = port->sep_lun;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+ printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+ " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item));
+ return -EINVAL;
+ }
+
+ if (count > TG_PT_GROUP_NAME_BUF) {
+ printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+ return -EINVAL;
+ }
+ memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+ memcpy(buf, page, count);
+ /*
+ * Any ALUA target port group alias besides "NULL" means we will be
+ * making a new group association.
+ */
+ if (strcmp(strstrip(buf), "NULL")) {
+ /*
+ * core_alua_get_tg_pt_gp_by_name() will increment reference to
+ * struct t10_alua_tg_pt_gp. This reference is released with
+ * core_alua_put_tg_pt_gp_from_name() below.
+ */
+ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+ strstrip(buf));
+ if (!(tg_pt_gp_new))
+ return -ENODEV;
+ }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem)) {
+ if (tg_pt_gp_new)
+ core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+ printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ /*
+ * Clearing an existing tg_pt_gp association, and replacing
+ * with the default_tg_pt_gp.
+ */
+ if (!(tg_pt_gp_new)) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+ " %s/tpgt_%hu/%s from ALUA Target Port Group:"
+ " alua/%s, ID: %hu back to"
+ " default_tg_pt_gp\n",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item),
+ config_item_name(
+ &tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id);
+
+ __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ return count;
+ }
+ /*
+ * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+ */
+ __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+ move = 1;
+ }
+ /*
+ * Associate tg_pt_gp_mem with tg_pt_gp_new.
+ */
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+ " Target Port Group: alua/%s, ID: %hu\n", (move) ?
+ "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item),
+ config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+ tg_pt_gp_new->tg_pt_gp_id);
+
+ core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+ return count;
+}
+
+ssize_t core_alua_show_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+ return sprintf(page, "Implict and Explict\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+ return sprintf(page, "Implict\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+ return sprintf(page, "Explict\n");
+ else
+ return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_access_type\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+ printk(KERN_ERR "Illegal value for alua_access_type:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ if (tmp == 3)
+ tg_pt_gp->tg_pt_gp_alua_access_type =
+ TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ else if (tmp == 2)
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ else if (tmp == 1)
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ else
+ tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+ return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+ return -EINVAL;
+ }
+ if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+ printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+ " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+ ALUA_MAX_NONOP_DELAY_MSECS);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+ return -EINVAL;
+ }
+ if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+ printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+ " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+ ALUA_MAX_TRANS_DELAY_MSECS);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return sprintf(page, "%d\n",
+ atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned long tmp;
+ int ret;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+ tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+ lun->lun_sep, 0, (int)tmp);
+ if (ret < 0)
+ return -EINVAL;
+
+ return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+ struct se_lun *lun,
+ char *page)
+{
+ return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+ return -EINVAL;
+ }
+ if ((tmp != ALUA_STATUS_NONE) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+ tmp);
+ return -EINVAL;
+ }
+ lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+ struct se_lun *lun,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+ return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+ * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
+ * cause a problem because libata and some SATA RAID HBAs appear
+ * under Linux/SCSI, but emulate SCSI logic themselves.
+ */
+ if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+ alua->alua_type = SPC_ALUA_PASSTHROUGH;
+ alua->alua_state_check = &core_alua_state_check_nop;
+ printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+ " emulation\n", TRANSPORT(dev)->name);
+ return 0;
+ }
+ /*
+ * If SPC-3 or above is reported by real or emulated struct se_device,
+ * use emulated ALUA.
+ */
+ if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+ " device\n", TRANSPORT(dev)->name);
+ /*
+ * Assoicate this struct se_device with the default ALUA
+ * LUN Group.
+ */
+ lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+ if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
+ return -1;
+
+ alua->alua_type = SPC3_ALUA_EMULATED;
+ alua->alua_state_check = &core_alua_state_check;
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ __core_alua_attach_lu_gp_mem(lu_gp_mem,
+ se_global->default_lu_gp);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+ " core/alua/lu_gps/default_lu_gp\n",
+ TRANSPORT(dev)->name);
+ } else {
+ alua->alua_type = SPC2_ALUA_DISABLED;
+ alua->alua_state_check = &core_alua_state_check_nop;
+ printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+ " device\n", TRANSPORT(dev)->name);
+ }
+
+ return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 000000000000..c86f97a081ed
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA 0x00
+#define TPGS_IMPLICT_ALUA 0x10
+#define TPGS_EXPLICT_ALUA 0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
+#define ALUA_ACCESS_STATE_STANDBY 0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_OFFLINE 0xe
+#define ALUA_ACCESS_STATE_TRANSITION 0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE 0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
+#define ASCQ_04H_ALUA_OFFLINE 0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
+#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
+#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN 512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int core_emulate_report_target_port_groups(struct se_cmd *);
+extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+ struct se_device *, struct se_port *,
+ struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+ struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+ struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+ struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+ struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+ struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+ size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+ size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+ const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+ char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+ const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 000000000000..366080baf474
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ /*
+ * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+ */
+ buf[5] = 0x80;
+
+ /*
+ * Set TPGS field for explict and/or implict ALUA access type
+ * and opteration.
+ *
+ * See spc4r17 section 6.4.2 Table 135
+ */
+ if (!port)
+ return;
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (tg_pt_gp)
+ buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+
+ /*
+ * Make sure we at least have 6 bytes of INQUIRY response
+ * payload going back for EVPD=0
+ */
+ if (cmd->data_length < 6) {
+ printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ " too small for EVPD=0\n", cmd->data_length);
+ return -1;
+ }
+
+ buf[0] = dev->transport->get_device_type(dev);
+ if (buf[0] == TYPE_TAPE)
+ buf[1] = 0x80;
+ buf[2] = dev->transport->get_device_rev(dev);
+
+ /*
+ * Enable SCCS and TPGS fields for Emulated ALUA
+ */
+ if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+ target_fill_alua_data(lun->lun_sep, buf);
+
+ if (cmd->data_length < 8) {
+ buf[4] = 1; /* Set additional length to 1 */
+ return 0;
+ }
+
+ buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+
+ /*
+ * Do not include vendor, product, reversion info in INQUIRY
+ * response payload for cdbs with a small allocation length.
+ */
+ if (cmd->data_length < 36) {
+ buf[4] = 3; /* Set additional length to 3 */
+ return 0;
+ }
+
+ snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+ snprintf((unsigned char *)&buf[16], 16, "%s",
+ &DEV_T10_WWN(dev)->model[0]);
+ snprintf((unsigned char *)&buf[32], 4, "%s",
+ &DEV_T10_WWN(dev)->revision[0]);
+ buf[4] = 31; /* Set additional length to 31 */
+ return 0;
+}
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ buf[1] = 0x00;
+ if (cmd->data_length < 8)
+ return 0;
+
+ buf[4] = 0x0;
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = 3;
+ buf[5] = 0x80;
+ buf[6] = 0x83;
+ buf[7] = 0x86;
+ }
+
+ return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ u16 len = 0;
+
+ buf[1] = 0x80;
+ if (dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ u32 unit_serial_len;
+
+ unit_serial_len =
+ strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ unit_serial_len++; /* For NULL Terminator */
+
+ if (((len + 4) + unit_serial_len) > cmd->data_length) {
+ len += unit_serial_len;
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff);
+ return 0;
+ }
+ len += sprintf((unsigned char *)&buf[4], "%s",
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ len++; /* Extra Byte for NULL Terminator */
+ buf[3] = len;
+ }
+ return 0;
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_port *port = NULL;
+ struct se_portal_group *tpg = NULL;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char binary, binary_new;
+ unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+ u32 prod_len;
+ u32 unit_serial_len, off = 0;
+ int i;
+ u16 len = 0, id_len;
+
+ buf[1] = 0x83;
+ off = 4;
+
+ /*
+ * NAA IEEE Registered Extended Assigned designator format, see
+ * spc4r17 section 7.7.3.6.5
+ *
+ * We depend upon a target_core_mod/ConfigFS provided
+ * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+ * value in order to return the NAA id.
+ */
+ if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+ goto check_t10_vend_desc;
+
+ if (off + 20 > cmd->data_length)
+ goto check_t10_vend_desc;
+
+ /* CODE SET == Binary */
+ buf[off++] = 0x1;
+
+ /* Set ASSOICATION == addressed logical unit: 0)b */
+ buf[off] = 0x00;
+
+ /* Identifier/Designator type == NAA identifier */
+ buf[off++] = 0x3;
+ off++;
+
+ /* Identifier/Designator length */
+ buf[off++] = 0x10;
+
+ /*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off++] = (0x6 << 4);
+
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ buf[off++] = 0x01;
+ buf[off++] = 0x40;
+ buf[off] = (0x5 << 4);
+
+ /*
+ * Return ConfigFS Unit Serial Number information for
+ * VENDOR_SPECIFIC_IDENTIFIER and
+ * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+ */
+ binary = transport_asciihex_to_binaryhex(
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ buf[off++] |= (binary & 0xf0) >> 4;
+ for (i = 0; i < 24; i += 2) {
+ binary_new = transport_asciihex_to_binaryhex(
+ &DEV_T10_WWN(dev)->unit_serial[i+2]);
+ buf[off] = (binary & 0x0f) << 4;
+ buf[off++] |= (binary_new & 0xf0) >> 4;
+ binary = binary_new;
+ }
+ len = 20;
+ off = (len + 4);
+
+check_t10_vend_desc:
+ /*
+ * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+ */
+ id_len = 8; /* For Vendor field */
+ prod_len = 4; /* For VPD Header */
+ prod_len += 8; /* For Vendor field */
+ prod_len += strlen(prod);
+ prod_len++; /* For : */
+
+ if (dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ unit_serial_len =
+ strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ unit_serial_len++; /* For NULL Terminator */
+
+ if ((len + (id_len + 4) +
+ (prod_len + unit_serial_len)) >
+ cmd->data_length) {
+ len += (prod_len + unit_serial_len);
+ goto check_port;
+ }
+ id_len += sprintf((unsigned char *)&buf[off+12],
+ "%s:%s", prod,
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ }
+ buf[off] = 0x2; /* ASCII */
+ buf[off+1] = 0x1; /* T10 Vendor ID */
+ buf[off+2] = 0x0;
+ memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+ /* Extra Byte for NULL Terminator */
+ id_len++;
+ /* Identifier Length */
+ buf[off+3] = id_len;
+ /* Header size for Designation descriptor */
+ len += (id_len + 4);
+ off += (id_len + 4);
+ /*
+ * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+ */
+check_port:
+ port = lun->lun_sep;
+ if (port) {
+ struct t10_alua_lu_gp *lu_gp;
+ u32 padding, scsi_name_len;
+ u16 lu_gp_id = 0;
+ u16 tg_pt_gp_id = 0;
+ u16 tpgt;
+
+ tpg = port->sep_tpg;
+ /*
+ * Relative target port identifer, see spc4r17
+ * section 7.7.3.7
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_tpgi;
+ }
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Relative target port identifer */
+ buf[off++] |= 0x4;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ /* Skip over Obsolete field in RTPI payload
+ * in Table 472 */
+ off += 2;
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Target port group identifier, see spc4r17
+ * section 7.7.3.8
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+check_tpgi:
+ if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+ SPC3_ALUA_EMULATED)
+ goto check_scsi_name;
+
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_lu_gp;
+ }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ goto check_lu_gp;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (!(tg_pt_gp)) {
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ goto check_lu_gp;
+ }
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Target port group identifier */
+ buf[off++] |= 0x5;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+ buf[off++] = (tg_pt_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Logical Unit Group identifier, see spc4r17
+ * section 7.7.3.8
+ */
+check_lu_gp:
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_scsi_name;
+ }
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem))
+ goto check_scsi_name;
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if (!(lu_gp)) {
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+ goto check_scsi_name;
+ }
+ lu_gp_id = lu_gp->lu_gp_id;
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ /* DESIGNATOR TYPE == Logical Unit Group identifier */
+ buf[off++] |= 0x6;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((lu_gp_id >> 8) & 0xff);
+ buf[off++] = (lu_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * SCSI name string designator, see spc4r17
+ * section 7.7.3.11
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+check_scsi_name:
+ scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+ scsi_name_len += 10;
+ /* Check for 4-byte padding */
+ padding = ((-scsi_name_len) & 3);
+ if (padding != 0)
+ scsi_name_len += padding;
+ /* Header size + Designation descriptor */
+ scsi_name_len += 4;
+
+ if (((len + 4) + scsi_name_len) > cmd->data_length) {
+ len += scsi_name_len;
+ goto set_len;
+ }
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+ * UTF-8 encoding.
+ */
+ tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+ scsi_name_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ if (padding)
+ scsi_name_len += padding;
+
+ buf[off-1] = scsi_name_len;
+ off += scsi_name_len;
+ /* Header size + Designation descriptor */
+ len += (scsi_name_len + 4);
+ }
+set_len:
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+ return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+ if (cmd->data_length < 60)
+ return 0;
+
+ buf[1] = 0x86;
+ buf[2] = 0x3c;
+ /* Set HEADSUP, ORDSUP, SIMPSUP */
+ buf[5] = 0x07;
+
+ /* If WriteCache emulation is enabled, set V_SUP */
+ if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+ buf[6] = 0x01;
+ return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ int have_tp = 0;
+
+ /*
+ * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+ * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+ * different page length for Thin Provisioning.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ have_tp = 1;
+
+ if (cmd->data_length < (0x10 + 4)) {
+ printk(KERN_INFO "Received data_length: %u"
+ " too small for EVPD 0xb0\n",
+ cmd->data_length);
+ return -1;
+ }
+
+ if (have_tp && cmd->data_length < (0x3c + 4)) {
+ printk(KERN_INFO "Received data_length: %u"
+ " too small for TPE=1 EVPD 0xb0\n",
+ cmd->data_length);
+ have_tp = 0;
+ }
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[1] = 0xb0;
+ buf[3] = have_tp ? 0x3c : 0x10;
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+ */
+ put_unaligned_be16(1, &buf[6]);
+
+ /*
+ * Set MAXIMUM TRANSFER LENGTH
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+
+ /*
+ * Exit now if we don't support TP or the initiator sent a too
+ * short buffer.
+ */
+ if (!have_tp || cmd->data_length < (0x3c + 4))
+ return 0;
+
+ /*
+ * Set MAXIMUM UNMAP LBA COUNT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+
+ /*
+ * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+ &buf[24]);
+
+ /*
+ * Set OPTIMAL UNMAP GRANULARITY
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+
+ /*
+ * UNMAP GRANULARITY ALIGNMENT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+ &buf[32]);
+ if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+ buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+ return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ /*
+ * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+ *
+ * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+ * zero, then the page length shall be set to 0004h. If the DP bit
+ * is set to one, then the page length shall be set to the value
+ * defined in table 162.
+ */
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[1] = 0xb2;
+
+ /*
+ * Set Hardcoded length mentioned above for DP=0
+ */
+ put_unaligned_be16(0x0004, &buf[2]);
+
+ /*
+ * The THRESHOLD EXPONENT field indicates the threshold set size in
+ * LBAs as a power of 2 (i.e., the threshold set size is equal to
+ * 2(threshold exponent)).
+ *
+ * Note that this is currently set to 0x00 as mkp says it will be
+ * changing again. We can enable this once it has settled in T10
+ * and is actually used by Linux/SCSI ML code.
+ */
+ buf[4] = 0x00;
+
+ /*
+ * A TPU bit set to one indicates that the device server supports
+ * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+ * that the device server does not support the UNMAP command.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+ buf[5] = 0x80;
+
+ /*
+ * A TPWS bit set to one indicates that the device server supports
+ * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+ * A TPWS bit set to zero indicates that the device server does not
+ * support the use of the WRITE SAME (16) command to unmap LBAs.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+ buf[5] |= 0x40;
+
+ return 0;
+}
+
+static int
+target_emulate_inquiry(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned char *cdb = cmd->t_task->t_task_cdb;
+
+ if (!(cdb[1] & 0x1))
+ return target_emulate_inquiry_std(cmd);
+
+ /*
+ * Make sure we at least have 4 bytes of INQUIRY response
+ * payload for 0x00 going back for EVPD=1. Note that 0x80
+ * and 0x83 will check for enough payload data length and
+ * jump to set_len: label when there is not enough inquiry EVPD
+ * payload length left for the next outgoing EVPD metadata
+ */
+ if (cmd->data_length < 4) {
+ printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ " too small for EVPD=1\n", cmd->data_length);
+ return -1;
+ }
+ buf[0] = dev->transport->get_device_type(dev);
+
+ switch (cdb[2]) {
+ case 0x00:
+ return target_emulate_evpd_00(cmd, buf);
+ case 0x80:
+ return target_emulate_evpd_80(cmd, buf);
+ case 0x83:
+ return target_emulate_evpd_83(cmd, buf);
+ case 0x86:
+ return target_emulate_evpd_86(cmd, buf);
+ case 0xb0:
+ return target_emulate_evpd_b0(cmd, buf);
+ case 0xb2:
+ return target_emulate_evpd_b2(cmd, buf);
+ default:
+ printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+target_emulate_readcapacity(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ u32 blocks = dev->transport->get_blocks(dev);
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+ buf[2] = (blocks >> 8) & 0xff;
+ buf[3] = blocks & 0xff;
+ buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+ buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+ buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+ buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+ /*
+ * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+ return 0;
+}
+
+static int
+target_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+ buf[2] = (blocks >> 40) & 0xff;
+ buf[3] = (blocks >> 32) & 0xff;
+ buf[4] = (blocks >> 24) & 0xff;
+ buf[5] = (blocks >> 16) & 0xff;
+ buf[6] = (blocks >> 8) & 0xff;
+ buf[7] = blocks & 0xff;
+ buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+ buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+ buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+ buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+ /*
+ * Set Thin Provisioning Enable bit following sbc3r22 in section
+ * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ buf[14] = 0x80;
+
+ return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+ p[0] = 0x01;
+ p[1] = 0x0a;
+
+ return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+ p[0] = 0x0a;
+ p[1] = 0x0a;
+ p[2] = 2;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+ *
+ * 00b: The logical unit shall clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when a com-
+ * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+ * status.
+ *
+ * 10b: The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when
+ * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+ * CONFLICT status.
+ *
+ * 11b a The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall establish a unit attention condition for the
+ * initiator port associated with the I_T nexus on which the BUSY,
+ * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+ * Depending on the status, the additional sense code shall be set to
+ * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+ * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+ * command, a unit attention condition shall be established only once
+ * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+ * to the number of commands completed with one of those status codes.
+ */
+ p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Task Aborted Status (TAS) bit set to zero.
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+ p[8] = 0xff;
+ p[9] = 0xff;
+ p[11] = 30;
+
+ return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+ p[0] = 0x08;
+ p[1] = 0x12;
+ if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+ p[2] = 0x04; /* Write Cache Enable */
+ p[12] = 0x20; /* Disabled Read Ahead */
+
+ return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+ /*
+ * I believe that the WP bit (bit 7) in the mode header is the same for
+ * all device types..
+ */
+ switch (type) {
+ case TYPE_DISK:
+ case TYPE_TAPE:
+ default:
+ buf[0] |= 0x80; /* WP bit */
+ break;
+ }
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+ switch (type) {
+ case TYPE_DISK:
+ buf[0] |= 0x10; /* DPOFUA bit */
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+target_emulate_modesense(struct se_cmd *cmd, int ten)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ char *cdb = cmd->t_task->t_task_cdb;
+ unsigned char *rbuf = cmd->t_task->t_task_buf;
+ int type = dev->transport->get_device_type(dev);
+ int offset = (ten) ? 8 : 4;
+ int length = 0;
+ unsigned char buf[SE_MODE_PAGE_BUF];
+
+ memset(buf, 0, SE_MODE_PAGE_BUF);
+
+ switch (cdb[2] & 0x3f) {
+ case 0x01:
+ length = target_modesense_rwrecovery(&buf[offset]);
+ break;
+ case 0x08:
+ length = target_modesense_caching(dev, &buf[offset]);
+ break;
+ case 0x0a:
+ length = target_modesense_control(dev, &buf[offset]);
+ break;
+ case 0x3f:
+ length = target_modesense_rwrecovery(&buf[offset]);
+ length += target_modesense_caching(dev, &buf[offset+length]);
+ length += target_modesense_control(dev, &buf[offset+length]);
+ break;
+ default:
+ printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+ cdb[2] & 0x3f);
+ return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ }
+ offset += length;
+
+ if (ten) {
+ offset -= 2;
+ buf[0] = (offset >> 8) & 0xff;
+ buf[1] = offset & 0xff;
+
+ if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ target_modesense_write_protect(&buf[3], type);
+
+ if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ target_modesense_dpofua(&buf[3], type);
+
+ if ((offset + 2) > cmd->data_length)
+ offset = cmd->data_length;
+
+ } else {
+ offset -= 1;
+ buf[0] = offset & 0xff;
+
+ if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ target_modesense_write_protect(&buf[2], type);
+
+ if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ target_modesense_dpofua(&buf[2], type);
+
+ if ((offset + 1) > cmd->data_length)
+ offset = cmd->data_length;
+ }
+ memcpy(rbuf, buf, offset);
+
+ return 0;
+}
+
+static int
+target_emulate_request_sense(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task->t_task_cdb;
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ u8 ua_asc = 0, ua_ascq = 0;
+
+ if (cdb[1] & 0x01) {
+ printk(KERN_ERR "REQUEST_SENSE description emulation not"
+ " supported\n");
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+ if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+ /*
+ * CURRENT ERROR, UNIT ATTENTION
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ /*
+ * Make sure request data length is enough for additional
+ * sense data.
+ */
+ if (cmd->data_length <= 18) {
+ buf[7] = 0x00;
+ return 0;
+ }
+ /*
+ * The Additional Sense Code (ASC) from the UNIT ATTENTION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+ buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+ buf[7] = 0x0A;
+ } else {
+ /*
+ * CURRENT ERROR, NO SENSE
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+ /*
+ * Make sure request data length is enough for additional
+ * sense data.
+ */
+ if (cmd->data_length <= 18) {
+ buf[7] = 0x00;
+ return 0;
+ }
+ /*
+ * NO ADDITIONAL SENSE INFORMATION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = 0x00;
+ buf[7] = 0x0A;
+ }
+
+ return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_unmap(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
+ unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+ sector_t lba;
+ unsigned int size = cmd->data_length, range;
+ int ret, offset;
+ unsigned short dl, bd_dl;
+
+ /* First UNMAP block descriptor starts at 8 byte offset */
+ offset = 8;
+ size -= 8;
+ dl = get_unaligned_be16(&cdb[0]);
+ bd_dl = get_unaligned_be16(&cdb[2]);
+ ptr = &buf[offset];
+ printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+ while (size) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+ ret);
+ return -1;
+ }
+
+ ptr += 16;
+ size -= 16;
+ }
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_write_same(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ sector_t lba = cmd->t_task->t_task_lba;
+ unsigned int range;
+ int ret;
+
+ range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+
+ printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
+ (unsigned long long)lba, range);
+
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+ return -1;
+ }
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+}
+
+int
+transport_emulate_control_cdb(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned short service_action;
+ int ret = 0;
+
+ switch (cmd->t_task->t_task_cdb[0]) {
+ case INQUIRY:
+ ret = target_emulate_inquiry(cmd);
+ break;
+ case READ_CAPACITY:
+ ret = target_emulate_readcapacity(cmd);
+ break;
+ case MODE_SENSE:
+ ret = target_emulate_modesense(cmd, 0);
+ break;
+ case MODE_SENSE_10:
+ ret = target_emulate_modesense(cmd, 1);
+ break;
+ case SERVICE_ACTION_IN:
+ switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ ret = target_emulate_readcapacity_16(cmd);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+ cmd->t_task->t_task_cdb[1] & 0x1f);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ break;
+ case REQUEST_SENSE:
+ ret = target_emulate_request_sense(cmd);
+ break;
+ case UNMAP:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+ dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_unmap(task);
+ break;
+ case WRITE_SAME_16:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_write_same(task);
+ break;
+ case VARIABLE_LENGTH_CMD:
+ service_action =
+ get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+ switch (service_action) {
+ case WRITE_SAME_32:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+ " supported for: %s\n",
+ dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_write_same(task);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+ " 0x%02x\n", service_action);
+ break;
+ }
+ break;
+ case SYNCHRONIZE_CACHE:
+ case 0x91: /* SYNCHRONIZE_CACHE_16: */
+ if (!dev->transport->do_sync_cache) {
+ printk(KERN_ERR
+ "SYNCHRONIZE_CACHE emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ dev->transport->do_sync_cache(task);
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ case ERASE:
+ case REZERO_UNIT:
+ case SEEK_10:
+ case SPACE:
+ case START_STOP:
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case WRITE_FILEMARKS:
+ break;
+ default:
+ printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+ cmd->t_task->t_task_cdb[0], dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
+ if (ret < 0)
+ return ret;
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 000000000000..2764510798b0
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3225 @@
+/*******************************************************************************
+ * Filename: target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+#include <linux/proc_fs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+static struct list_head g_tf_list;
+static struct mutex g_tf_lock;
+
+struct target_core_configfs_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(void *, char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+ " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+ utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+ .show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+ .ca_owner = THIS_MODULE,
+ .ca_name = "version",
+ .ca_mode = S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+
+ if (!(name))
+ return NULL;
+
+ mutex_lock(&g_tf_lock);
+ list_for_each_entry(tf, &g_tf_list, tf_list) {
+ if (!(strcmp(tf->tf_name, name))) {
+ atomic_inc(&tf->tf_access_cnt);
+ mutex_unlock(&g_tf_lock);
+ return tf;
+ }
+ }
+ mutex_unlock(&g_tf_lock);
+
+ return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+ struct config_group *group,
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+ int ret;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+ " %s\n", group, name);
+ /*
+ * Ensure that TCM subsystem plugins are loaded at this point for
+ * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
+ * LUN symlinks.
+ */
+ if (transport_subsystem_check_init() < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Below are some hardcoded request_module() calls to automatically
+ * local fabric modules when the following is called:
+ *
+ * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+ *
+ * Note that this does not limit which TCM fabric module can be
+ * registered, but simply provids auto loading logic for modules with
+ * mkdir(2) system calls with known TCM fabric modules.
+ */
+ if (!(strncmp(name, "iscsi", 5))) {
+ /*
+ * Automatically load the LIO Target fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/iscsi
+ */
+ ret = request_module("iscsi_target_mod");
+ if (ret < 0) {
+ printk(KERN_ERR "request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else if (!(strncmp(name, "loopback", 8))) {
+ /*
+ * Automatically load the tcm_loop fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/loopback
+ */
+ ret = request_module("tcm_loop");
+ if (ret < 0) {
+ printk(KERN_ERR "request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ tf = target_core_get_fabric(name);
+ if (!(tf)) {
+ printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+ name);
+ return ERR_PTR(-EINVAL);
+ }
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+ " %s\n", tf->tf_name);
+ /*
+ * On a successful target_core_get_fabric() look, the returned
+ * struct target_fabric_configfs *tf will contain a usage reference.
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+ &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+ tf->tf_group.default_groups = tf->tf_default_groups;
+ tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+ tf->tf_group.default_groups[1] = NULL;
+
+ config_group_init_type_name(&tf->tf_group, name,
+ &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+ &TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+ " %s\n", tf->tf_group.cg_item.ci_name);
+ /*
+ * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+ */
+ tf->tf_ops.tf_subsys = tf->tf_subsys;
+ tf->tf_fabric = &tf->tf_group.cg_item;
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+ " for %s\n", name);
+
+ return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct target_fabric_configfs *tf = container_of(
+ to_config_group(item), struct target_fabric_configfs, tf_group);
+ struct config_group *tf_group;
+ struct config_item *df_item;
+ int i;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+ " tf list\n", config_item_name(item));
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+ " %s\n", tf->tf_name);
+ atomic_dec(&tf->tf_access_cnt);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+ " tf->tf_fabric for %s\n", tf->tf_name);
+ tf->tf_fabric = NULL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+ " %s\n", config_item_name(item));
+
+ tf_group = &tf->tf_group;
+ for (i = 0; tf_group->default_groups[i]; i++) {
+ df_item = &tf_group->default_groups[i]->cg_item;
+ tf_group->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+ .make_group = &target_core_register_fabric,
+ .drop_item = &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+ &target_core_item_attr_version,
+ NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+ .ct_item_ops = &target_core_fabric_item_ops,
+ .ct_group_ops = &target_core_fabric_group_ops,
+ .ct_attrs = target_core_fabric_item_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "target",
+ .ci_type = &target_core_fabrics_item,
+ },
+ },
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+ &target_core_fabrics,
+ NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ * into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+ struct module *fabric_mod,
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+
+ if (!(fabric_mod)) {
+ printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
+ return NULL;
+ }
+ if (!(name)) {
+ printk(KERN_ERR "Unable to locate passed fabric name\n");
+ return NULL;
+ }
+ if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+ printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+ "_NAME_SIZE\n", name);
+ return NULL;
+ }
+
+ tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+ if (!(tf))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&tf->tf_list);
+ atomic_set(&tf->tf_access_cnt, 0);
+ /*
+ * Setup the default generic struct config_item_type's (cits) in
+ * struct target_fabric_configfs->tf_cit_tmpl
+ */
+ tf->tf_module = fabric_mod;
+ target_fabric_setup_cits(tf);
+
+ tf->tf_subsys = target_core_subsystem[0];
+ snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+ mutex_lock(&g_tf_lock);
+ list_add_tail(&tf->tf_list, &g_tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+ ">>>>>>>>>>>>>>\n");
+ printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+ " %s\n", tf, tf->tf_name);
+ return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+ struct target_fabric_configfs *tf)
+{
+ mutex_lock(&g_tf_lock);
+ list_del(&tf->tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+ struct target_fabric_configfs *tf)
+{
+ struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+ if (!(tfo->get_fabric_name)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_fabric_proto_ident)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_wwn)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_tag)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_default_depth)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_pr_transport_id)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_pr_transport_id_len)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode_cache)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode_write_protect)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_prod_mode_write_protect)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_alloc_fabric_acl)) {
+ printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_release_fabric_acl)) {
+ printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_inst_index)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->release_cmd_to_pool)) {
+ printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->release_cmd_direct)) {
+ printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->shutdown_session)) {
+ printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->close_session)) {
+ printk(KERN_ERR "Missing tfo->close_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->stop_session)) {
+ printk(KERN_ERR "Missing tfo->stop_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fall_back_to_erl0)) {
+ printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->sess_logged_in)) {
+ printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->sess_get_index)) {
+ printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->write_pending)) {
+ printk(KERN_ERR "Missing tfo->write_pending()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->write_pending_status)) {
+ printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->set_default_node_attributes)) {
+ printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_task_tag)) {
+ printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_cmd_state)) {
+ printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->new_cmd_failure)) {
+ printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_data_in)) {
+ printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_status)) {
+ printk(KERN_ERR "Missing tfo->queue_status()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_tm_rsp)) {
+ printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->set_fabric_sense_len)) {
+ printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_fabric_sense_len)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->is_state_remove)) {
+ printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->pack_lun)) {
+ printk(KERN_ERR "Missing tfo->pack_lun()\n");
+ return -EINVAL;
+ }
+ /*
+ * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+ * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+ * target_core_fabric_configfs.c WWN+TPG group context code.
+ */
+ if (!(tfo->fabric_make_wwn)) {
+ printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_drop_wwn)) {
+ printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_make_tpg)) {
+ printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_drop_tpg)) {
+ printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return. Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+ struct target_fabric_configfs *tf)
+{
+ struct config_group *su_group;
+ int ret;
+
+ if (!(tf)) {
+ printk(KERN_ERR "Unable to locate target_fabric_configfs"
+ " pointer\n");
+ return -EINVAL;
+ }
+ if (!(tf->tf_subsys)) {
+ printk(KERN_ERR "Unable to target struct config_subsystem"
+ " pointer\n");
+ return -EINVAL;
+ }
+ su_group = &tf->tf_subsys->su_group;
+ if (!(su_group)) {
+ printk(KERN_ERR "Unable to locate target struct config_group"
+ " pointer\n");
+ return -EINVAL;
+ }
+ ret = target_fabric_tf_ops_check(tf);
+ if (ret < 0)
+ return ret;
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+ ">>>>>>>>>>\n");
+ return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+ struct target_fabric_configfs *tf)
+{
+ struct config_group *su_group;
+ struct configfs_subsystem *su;
+
+ if (!(tf)) {
+ printk(KERN_ERR "Unable to locate passed target_fabric_"
+ "configfs\n");
+ return;
+ }
+ su = tf->tf_subsys;
+ if (!(su)) {
+ printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+ " pointer\n");
+ return;
+ }
+ su_group = &tf->tf_subsys->su_group;
+ if (!(su_group)) {
+ printk(KERN_ERR "Unable to locate target struct config_group"
+ " pointer\n");
+ return;
+ }
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+ ">>>>>>>>>>>>\n");
+ mutex_lock(&g_tf_lock);
+ if (atomic_read(&tf->tf_access_cnt)) {
+ mutex_unlock(&g_tf_lock);
+ printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+ tf->tf_name);
+ BUG();
+ }
+ list_del(&tf->tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+ " %s\n", tf->tf_name);
+ tf->tf_module = NULL;
+ tf->tf_subsys = NULL;
+ kfree(tf);
+
+ printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+ ">>>>>\n");
+ return;
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name) \
+static ssize_t target_core_dev_show_attr_##_name( \
+ struct se_dev_attrib *da, \
+ char *page) \
+{ \
+ struct se_device *dev; \
+ struct se_subsystem_dev *se_dev = da->da_sub_dev; \
+ ssize_t rb; \
+ \
+ spin_lock(&se_dev->se_dev_lock); \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ return -ENODEV; \
+ } \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+ spin_unlock(&se_dev->se_dev_lock); \
+ \
+ return rb; \
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name) \
+static ssize_t target_core_dev_store_attr_##_name( \
+ struct se_dev_attrib *da, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct se_device *dev; \
+ struct se_subsystem_dev *se_dev = da->da_sub_dev; \
+ unsigned long val; \
+ int ret; \
+ \
+ spin_lock(&se_dev->se_dev_lock); \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ return -ENODEV; \
+ } \
+ ret = strict_strtoul(page, 0, &val); \
+ if (ret < 0) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ printk(KERN_ERR "strict_strtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = se_dev_set_##_name(dev, (u32)val); \
+ spin_unlock(&se_dev->se_dev_lock); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_DEV_ATTRIB(_name) \
+DEF_DEV_ATTRIB_SHOW(_name); \
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name) \
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode) \
+static struct target_core_dev_attrib_attribute \
+ target_core_dev_attrib_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_show_attr_##_name, \
+ target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name); \
+static struct target_core_dev_attrib_attribute \
+ target_core_dev_attrib_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(task_timeout);
+SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_dpo.attr,
+ &target_core_dev_attrib_emulate_fua_write.attr,
+ &target_core_dev_attrib_emulate_fua_read.attr,
+ &target_core_dev_attrib_emulate_write_cache.attr,
+ &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &target_core_dev_attrib_emulate_tas.attr,
+ &target_core_dev_attrib_emulate_tpu.attr,
+ &target_core_dev_attrib_emulate_tpws.attr,
+ &target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_hw_block_size.attr,
+ &target_core_dev_attrib_block_size.attr,
+ &target_core_dev_attrib_hw_max_sectors.attr,
+ &target_core_dev_attrib_max_sectors.attr,
+ &target_core_dev_attrib_optimal_sectors.attr,
+ &target_core_dev_attrib_hw_queue_depth.attr,
+ &target_core_dev_attrib_queue_depth.attr,
+ &target_core_dev_attrib_task_timeout.attr,
+ &target_core_dev_attrib_max_unmap_lba_count.attr,
+ &target_core_dev_attrib_max_unmap_block_desc_count.attr,
+ &target_core_dev_attrib_unmap_granularity.attr,
+ &target_core_dev_attrib_unmap_granularity_alignment.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+ .show_attribute = target_core_dev_attrib_attr_show,
+ .store_attribute = target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+ .ct_item_ops = &target_core_dev_attrib_ops,
+ .ct_attrs = target_core_dev_attrib_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/* Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode) \
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_wwn_show_attr_##_name, \
+ target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name); \
+do { \
+ static struct target_core_dev_wwn_attribute \
+ target_core_dev_wwn_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_wwn_show_attr_##_name); \
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+ struct t10_wwn *t10_wwn,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+ &t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+ unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+ /*
+ * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+ * from the struct scsi_device level firmware, do not allow
+ * VPD Unit Serial to be emulated.
+ *
+ * Note this struct scsi_device could also be emulating VPD
+ * information from its drivers/scsi LLD. But for now we assume
+ * it is doing 'the right thing' wrt a world wide unique
+ * VPD Unit Serial Number that OS dependent multipath can depend on.
+ */
+ if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+ printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+ " Unit Serial, ignoring request\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+ printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+ " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+ return -EOVERFLOW;
+ }
+ /*
+ * Check to see if any active $FABRIC_MOD exports exist. If they
+ * do exist, fail here as changing this information on the fly
+ * (underneath the initiator side OS dependent multipath code)
+ * could cause negative effects.
+ */
+ dev = su_dev->se_dev_ptr;
+ if ((dev)) {
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "Unable to set VPD Unit Serial while"
+ " active %d $FABRIC_MOD exports exist\n",
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -EINVAL;
+ }
+ }
+ /*
+ * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+ *
+ * Also, strip any newline added from the userspace
+ * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+ */
+ memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+ snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+ snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+ "%s", strstrip(buf));
+ su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+ " %s\n", su_dev->t10_wwn.unit_serial);
+
+ return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+ struct t10_wwn *t10_wwn,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+ struct t10_vpd *vpd;
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ ssize_t len = 0;
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+ spin_lock(&t10_wwn->t10_vpd_lock);
+ list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+ if (!(vpd->protocol_identifier_set))
+ continue;
+
+ transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+ if ((len + strlen(buf) > PAGE_SIZE))
+ break;
+
+ len += sprintf(page+len, "%s", buf);
+ }
+ spin_unlock(&t10_wwn->t10_vpd_lock);
+
+ return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
+static ssize_t target_core_dev_wwn_show_attr_##_name( \
+ struct t10_wwn *t10_wwn, \
+ char *page) \
+{ \
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
+ struct se_device *dev; \
+ struct t10_vpd *vpd; \
+ unsigned char buf[VPD_TMP_BUF_SIZE]; \
+ ssize_t len = 0; \
+ \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) \
+ return -ENODEV; \
+ \
+ spin_lock(&t10_wwn->t10_vpd_lock); \
+ list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
+ if (vpd->association != _assoc) \
+ continue; \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ } \
+ spin_unlock(&t10_wwn->t10_vpd_lock); \
+ \
+ return len; \
+}
+
+/*
+ * VPD page 0x83 Assoication: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+ &target_core_dev_wwn_vpd_unit_serial.attr,
+ &target_core_dev_wwn_vpd_protocol_identifier.attr,
+ &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+ &target_core_dev_wwn_vpd_assoc_target_port.attr,
+ &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+ .show_attribute = target_core_dev_wwn_attr_show,
+ .store_attribute = target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+ .ct_item_ops = &target_core_dev_wwn_ops,
+ .ct_attrs = target_core_dev_wwn_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/* Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode) \
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_pr_show_attr_##_name, \
+ target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name); \
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+ struct se_device *dev,
+ char *page,
+ ssize_t *len)
+{
+ struct se_node_acl *se_nacl;
+ struct t10_pr_registration *pr_reg;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return *len;
+ }
+ se_nacl = pr_reg->pr_reg_nacl;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+ TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+ struct se_device *dev,
+ char *page,
+ ssize_t *len)
+{
+ struct se_node_acl *se_nacl;
+
+ spin_lock(&dev->dev_reservation_lock);
+ se_nacl = dev->dev_reserved_node_acl;
+ if (!(se_nacl)) {
+ *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return *len;
+ }
+ *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+ TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ ssize_t len = 0;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ switch (T10_RES(su_dev)->res_type) {
+ case SPC3_PERSISTENT_RESERVATIONS:
+ target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+ page, &len);
+ break;
+ case SPC2_RESERVATIONS:
+ target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+ page, &len);
+ break;
+ case SPC_PASSTHROUGH:
+ len += sprintf(page+len, "Passthrough\n");
+ break;
+ default:
+ len += sprintf(page+len, "Unknown\n");
+ break;
+ }
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct t10_pr_registration *pr_reg;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ /*
+ * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+ * Basic PERSISTENT RESERVER OUT parameter list, page 290
+ */
+ if (pr_reg->pr_reg_all_tg_pt)
+ len = sprintf(page, "SPC-3 Reservation: All Target"
+ " Ports registration\n");
+ else
+ len = sprintf(page, "SPC-3 Reservation: Single"
+ " Target Port registration\n");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct se_node_acl *se_nacl;
+ struct se_lun *lun;
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg;
+ struct target_core_fabric_ops *tfo;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ se_nacl = pr_reg->pr_reg_nacl;
+ se_tpg = se_nacl->se_tpg;
+ lun = pr_reg->pr_reg_tg_pt_lun;
+ tfo = TPG_TFO(se_tpg);
+
+ len += sprintf(page+len, "SPC-3 Reservation: %s"
+ " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+ tfo->tpg_get_wwn(se_tpg));
+ len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+ " Identifer Tag: %hu %s Portal Group Tag: %hu"
+ " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+ tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+ tfo->get_fabric_name(), lun->unpacked_lun);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct target_core_fabric_ops *tfo;
+ struct t10_pr_registration *pr_reg;
+ unsigned char buf[384];
+ char i_buf[PR_REG_ISID_ID_LEN];
+ ssize_t len = 0;
+ int reg_count = 0, prf_isid;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+
+ memset(buf, 0, 384);
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+ tfo->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+ &i_buf[0] : "", pr_reg->pr_res_key,
+ pr_reg->pr_res_generation);
+
+ if ((len + strlen(buf) > PAGE_SIZE))
+ break;
+
+ len += sprintf(page+len, "%s", buf);
+ reg_count++;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ if (!(reg_count))
+ len += sprintf(page+len, "None\n");
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct t10_pr_registration *pr_reg;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ ssize_t len = 0;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ switch (T10_RES(su_dev)->res_type) {
+ case SPC3_PERSISTENT_RESERVATIONS:
+ len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+ break;
+ case SPC2_RESERVATIONS:
+ len = sprintf(page, "SPC2_RESERVATIONS\n");
+ break;
+ case SPC_PASSTHROUGH:
+ len = sprintf(page, "SPC_PASSTHROUGH\n");
+ break;
+ default:
+ len = sprintf(page, "UNKNOWN\n");
+ break;
+ }
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "APTPL Bit Status: %s\n",
+ (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+ Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+ Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+ Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+ Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_initiator_fabric, "initiator_fabric=%s"},
+ {Opt_initiator_node, "initiator_node=%s"},
+ {Opt_initiator_sid, "initiator_sid=%s"},
+ {Opt_sa_res_key, "sa_res_key=%s"},
+ {Opt_res_holder, "res_holder=%d"},
+ {Opt_res_type, "res_type=%d"},
+ {Opt_res_scope, "res_scope=%d"},
+ {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+ {Opt_mapped_lun, "mapped_lun=%d"},
+ {Opt_target_fabric, "target_fabric=%s"},
+ {Opt_target_node, "target_node=%s"},
+ {Opt_tpgt, "tpgt=%d"},
+ {Opt_port_rtpi, "port_rtpi=%d"},
+ {Opt_target_lun, "target_lun=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+ struct se_subsystem_dev *su_dev,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev;
+ unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
+ unsigned char *isid = NULL;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long long tmp_ll;
+ u64 sa_res_key = 0;
+ u32 mapped_lun = 0, target_lun = 0;
+ int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+ u16 port_rpti = 0, tpgt = 0;
+ u8 type = 0, scope;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_INFO "Unable to process APTPL metadata while"
+ " active fabric exports exist\n");
+ return -EINVAL;
+ }
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_initiator_fabric:
+ i_fabric = match_strdup(&args[0]);
+ break;
+ case Opt_initiator_node:
+ i_port = match_strdup(&args[0]);
+ if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+ printk(KERN_ERR "APTPL metadata initiator_node="
+ " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+ PR_APTPL_MAX_IPORT_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_initiator_sid:
+ isid = match_strdup(&args[0]);
+ if (strlen(isid) > PR_REG_ISID_LEN) {
+ printk(KERN_ERR "APTPL metadata initiator_isid"
+ "= exceeds PR_REG_ISID_LEN: %d\n",
+ PR_REG_ISID_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_sa_res_key:
+ arg_p = match_strdup(&args[0]);
+ ret = strict_strtoull(arg_p, 0, &tmp_ll);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoull() failed for"
+ " sa_res_key=\n");
+ goto out;
+ }
+ sa_res_key = (u64)tmp_ll;
+ break;
+ /*
+ * PR APTPL Metadata for Reservation
+ */
+ case Opt_res_holder:
+ match_int(args, &arg);
+ res_holder = arg;
+ break;
+ case Opt_res_type:
+ match_int(args, &arg);
+ type = (u8)arg;
+ break;
+ case Opt_res_scope:
+ match_int(args, &arg);
+ scope = (u8)arg;
+ break;
+ case Opt_res_all_tg_pt:
+ match_int(args, &arg);
+ all_tg_pt = (int)arg;
+ break;
+ case Opt_mapped_lun:
+ match_int(args, &arg);
+ mapped_lun = (u32)arg;
+ break;
+ /*
+ * PR APTPL Metadata for Target Port
+ */
+ case Opt_target_fabric:
+ t_fabric = match_strdup(&args[0]);
+ break;
+ case Opt_target_node:
+ t_port = match_strdup(&args[0]);
+ if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+ printk(KERN_ERR "APTPL metadata target_node="
+ " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+ PR_APTPL_MAX_TPORT_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_tpgt:
+ match_int(args, &arg);
+ tpgt = (u16)arg;
+ break;
+ case Opt_port_rtpi:
+ match_int(args, &arg);
+ port_rpti = (u16)arg;
+ break;
+ case Opt_target_lun:
+ match_int(args, &arg);
+ target_lun = (u32)arg;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!(i_port) || !(t_port) || !(sa_res_key)) {
+ printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (res_holder && !(type)) {
+ printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+ " holder\n", type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+ i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+ res_holder, all_tg_pt, type);
+out:
+ kfree(orig);
+ return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+ &target_core_dev_pr_res_holder.attr,
+ &target_core_dev_pr_res_pr_all_tgt_pts.attr,
+ &target_core_dev_pr_res_pr_generation.attr,
+ &target_core_dev_pr_res_pr_holder_tg_port.attr,
+ &target_core_dev_pr_res_pr_registered_i_pts.attr,
+ &target_core_dev_pr_res_pr_type.attr,
+ &target_core_dev_pr_res_type.attr,
+ &target_core_dev_pr_res_aptpl_active.attr,
+ &target_core_dev_pr_res_aptpl_metadata.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+ .show_attribute = target_core_dev_pr_attr_show,
+ .store_attribute = target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+ .ct_item_ops = &target_core_dev_pr_ops,
+ .ct_attrs = target_core_dev_pr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_pr_cit */
+
+/* Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+ int bl = 0;
+ ssize_t read_bytes = 0;
+
+ if (!(se_dev->se_dev_ptr))
+ return -ENODEV;
+
+ transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+ read_bytes += bl;
+ read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "info",
+ .ca_mode = S_IRUGO },
+ .show = target_core_show_dev_info,
+ .store = NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+ "_dev_su_ptr\n");
+ return -EINVAL;
+ }
+
+ return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "control",
+ .ca_mode = S_IWUSR },
+ .show = NULL,
+ .store = target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+ if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+ return 0;
+
+ return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ ssize_t read_bytes;
+
+ if (count > (SE_DEV_ALIAS_LEN-1)) {
+ printk(KERN_ERR "alias count: %d exceeds"
+ " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+ SE_DEV_ALIAS_LEN-1);
+ return -EINVAL;
+ }
+
+ se_dev->su_dev_flags |= SDF_USING_ALIAS;
+ read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+ "%s", page);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&se_dev->se_dev_group.cg_item),
+ se_dev->se_dev_alias);
+
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "alias",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_alias,
+ .store = target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+ return 0;
+
+ return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ ssize_t read_bytes;
+
+ if (count > (SE_UDEV_PATH_LEN-1)) {
+ printk(KERN_ERR "udev_path count: %d exceeds"
+ " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+ SE_UDEV_PATH_LEN-1);
+ return -EINVAL;
+ }
+
+ se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+ read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+ "%s", page);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&se_dev->se_dev_group.cg_item),
+ se_dev->se_dev_udev_path);
+
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "udev_path",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_udev_path,
+ .store = target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_device *dev;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+ char *ptr;
+
+ ptr = strstr(page, "1");
+ if (!(ptr)) {
+ printk(KERN_ERR "For dev_enable ops, only valid value"
+ " is \"1\"\n");
+ return -EINVAL;
+ }
+ if ((se_dev->se_dev_ptr)) {
+ printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+ " object\n");
+ return -EEXIST;
+ }
+
+ if (t->check_configfs_dev_params(hba, se_dev) < 0)
+ return -EINVAL;
+
+ dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+ if (!(dev) || IS_ERR(dev))
+ return -EINVAL;
+
+ se_dev->se_dev_ptr = dev;
+ printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+ " %p\n", se_dev->se_dev_ptr);
+
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "enable",
+ .ca_mode = S_IWUSR },
+ .show = NULL,
+ .store = target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+ struct se_device *dev;
+ struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct config_item *lu_ci;
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+ return len;
+
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem)) {
+ printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ " pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ lu_ci = &lu_gp->lu_gp_group.cg_item;
+ len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+ config_item_name(lu_ci), lu_gp->lu_gp_id);
+ }
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev;
+ struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = su_dev->se_dev_hba;
+ struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ unsigned char buf[LU_GROUP_NAME_BUF];
+ int move = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+ printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item));
+ return -EINVAL;
+ }
+ if (count > LU_GROUP_NAME_BUF) {
+ printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+ return -EINVAL;
+ }
+ memset(buf, 0, LU_GROUP_NAME_BUF);
+ memcpy(buf, page, count);
+ /*
+ * Any ALUA logical unit alias besides "NULL" means we will be
+ * making a new group association.
+ */
+ if (strcmp(strstrip(buf), "NULL")) {
+ /*
+ * core_alua_get_lu_gp_by_name() will increment reference to
+ * struct t10_alua_lu_gp. This reference is released with
+ * core_alua_get_lu_gp_by_name below().
+ */
+ lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+ if (!(lu_gp_new))
+ return -ENODEV;
+ }
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem)) {
+ if (lu_gp_new)
+ core_alua_put_lu_gp_from_name(lu_gp_new);
+ printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ " pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ /*
+ * Clearing an existing lu_gp association, and replacing
+ * with NULL
+ */
+ if (!(lu_gp_new)) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+ " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+ " %hu\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ lu_gp->lu_gp_id);
+
+ __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ return count;
+ }
+ /*
+ * Removing existing association of lu_gp_mem with lu_gp
+ */
+ __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+ move = 1;
+ }
+ /*
+ * Associate lu_gp_mem with lu_gp_new.
+ */
+ __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+ " core/alua/lu_gps/%s, ID: %hu\n",
+ (move) ? "Moving" : "Adding",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+ lu_gp_new->lu_gp_id);
+
+ core_alua_put_lu_gp_from_name(lu_gp_new);
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "alua_lu_gp",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_alua_lu_gp,
+ .store = target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+ &target_core_attr_dev_info.attr,
+ &target_core_attr_dev_control.attr,
+ &target_core_attr_dev_alias.attr,
+ &target_core_attr_dev_udev_path.attr,
+ &target_core_attr_dev_enable.attr,
+ &target_core_attr_dev_alua_lu_gp.attr,
+ NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+ struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+ struct se_subsystem_dev, se_dev_group);
+ struct config_group *dev_cg;
+
+ if (!(se_dev))
+ return;
+
+ dev_cg = &se_dev->se_dev_group;
+ kfree(dev_cg->default_groups);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(item), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_core_configfs_attribute *tc_attr = container_of(
+ attr, struct target_core_configfs_attribute, attr);
+
+ if (!(tc_attr->show))
+ return -EINVAL;
+
+ return tc_attr->show((void *)se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(item), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_core_configfs_attribute *tc_attr = container_of(
+ attr, struct target_core_configfs_attribute, attr);
+
+ if (!(tc_attr->store))
+ return -EINVAL;
+
+ return tc_attr->store((void *)se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+ .release = target_core_dev_release,
+ .show_attribute = target_core_dev_show,
+ .store_attribute = target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+ .ct_item_ops = &target_core_dev_item_ops,
+ .ct_attrs = lio_core_dev_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
+static struct target_core_alua_lu_gp_attribute \
+ target_core_alua_lu_gp_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_alua_lu_gp_show_attr_##_name, \
+ target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
+static struct target_core_alua_lu_gp_attribute \
+ target_core_alua_lu_gp_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+ struct t10_alua_lu_gp *lu_gp,
+ char *page)
+{
+ if (!(lu_gp->lu_gp_valid_id))
+ return 0;
+
+ return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+ struct t10_alua_lu_gp *lu_gp,
+ const char *page,
+ size_t count)
+{
+ struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+ unsigned long lu_gp_id;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &lu_gp_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " lu_gp_id\n", ret);
+ return -EINVAL;
+ }
+ if (lu_gp_id > 0x0000ffff) {
+ printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+ " 0x0000ffff\n", lu_gp_id);
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+ if (ret < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s to ID: %hu\n",
+ config_item_name(&alua_lu_gp_cg->cg_item),
+ lu_gp->lu_gp_id);
+
+ return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+ struct t10_alua_lu_gp *lu_gp,
+ char *page)
+{
+ struct se_device *dev;
+ struct se_hba *hba;
+ struct se_subsystem_dev *su_dev;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ ssize_t len = 0, cur_len;
+ unsigned char buf[LU_GROUP_NAME_BUF];
+
+ memset(buf, 0, LU_GROUP_NAME_BUF);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+ dev = lu_gp_mem->lu_gp_mem_dev;
+ su_dev = dev->se_sub_dev;
+ hba = su_dev->se_dev_hba;
+
+ cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item));
+ cur_len++; /* Extra byte for NULL terminator */
+
+ if ((cur_len + len) > PAGE_SIZE) {
+ printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ "_members buffer\n");
+ break;
+ }
+ memcpy(page+len, buf, cur_len);
+ len += cur_len;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+ &target_core_alua_lu_gp_lu_gp_id.attr,
+ &target_core_alua_lu_gp_members.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+ .show_attribute = target_core_alua_lu_gp_attr_show,
+ .store_attribute = target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+ .ct_item_ops = &target_core_alua_lu_gp_ops,
+ .ct_attrs = target_core_alua_lu_gp_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua_lu_gp *lu_gp;
+ struct config_group *alua_lu_gp_cg = NULL;
+ struct config_item *alua_lu_gp_ci = NULL;
+
+ lu_gp = core_alua_allocate_lu_gp(name, 0);
+ if (IS_ERR(lu_gp))
+ return NULL;
+
+ alua_lu_gp_cg = &lu_gp->lu_gp_group;
+ alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+ config_group_init_type_name(alua_lu_gp_cg, name,
+ &target_core_alua_lu_gp_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s\n",
+ config_item_name(alua_lu_gp_ci));
+
+ return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+ struct t10_alua_lu_gp, lu_gp_group);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s, ID: %hu\n",
+ config_item_name(item), lu_gp->lu_gp_id);
+
+ config_item_put(item);
+ core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+ .make_group = &target_core_alua_create_lu_gp,
+ .drop_item = &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+ .ct_item_ops = NULL,
+ .ct_group_ops = &target_core_alua_lu_gps_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
+static struct target_core_alua_tg_pt_gp_attribute \
+ target_core_alua_tg_pt_gp_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_alua_tg_pt_gp_show_attr_##_name, \
+ target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
+static struct target_core_alua_tg_pt_gp_attribute \
+ target_core_alua_tg_pt_gp_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ unsigned long tmp;
+ int new_state, ret;
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+ printk(KERN_ERR "Unable to do implict ALUA on non valid"
+ " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ return -EINVAL;
+ }
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk("Unable to extract new ALUA access state from"
+ " %s\n", page);
+ return -EINVAL;
+ }
+ new_state = (int)tmp;
+
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Unable to process implict configfs ALUA"
+ " transition while TPGS_IMPLICT_ALUA is diabled\n");
+ return -EINVAL;
+ }
+
+ ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+ NULL, NULL, new_state, 0);
+ return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%s\n",
+ core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int new_status, ret;
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+ printk(KERN_ERR "Unable to do set ALUA access status on non"
+ " valid tg_pt_gp ID: %hu\n",
+ tg_pt_gp->tg_pt_gp_valid_id);
+ return -EINVAL;
+ }
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract new ALUA access status"
+ " from %s\n", page);
+ return -EINVAL;
+ }
+ new_status = (int)tmp;
+
+ if ((new_status != ALUA_STATUS_NONE) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+ new_status);
+ return -EINVAL;
+ }
+
+ tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_write_metadata:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ return 0;
+
+ return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+ unsigned long tg_pt_gp_id;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " tg_pt_gp_id\n", ret);
+ return -EINVAL;
+ }
+ if (tg_pt_gp_id > 0x0000ffff) {
+ printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+ " 0x0000ffff\n", tg_pt_gp_id);
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+ if (ret < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+ "core/alua/tg_pt_gps/%s to ID: %hu\n",
+ config_item_name(&alua_tg_pt_gp_cg->cg_item),
+ tg_pt_gp->tg_pt_gp_id);
+
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ struct se_port *port;
+ struct se_portal_group *tpg;
+ struct se_lun *lun;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ ssize_t len = 0, cur_len;
+ unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+ memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = tg_pt_gp_mem->tg_pt;
+ tpg = port->sep_tpg;
+ lun = port->sep_lun;
+
+ cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+ "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item));
+ cur_len++; /* Extra byte for NULL terminator */
+
+ if ((cur_len + len) > PAGE_SIZE) {
+ printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ "_members buffer\n");
+ break;
+ }
+ memcpy(page+len, buf, cur_len);
+ len += cur_len;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+ tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+ &target_core_alua_tg_pt_gp_alua_access_state.attr,
+ &target_core_alua_tg_pt_gp_alua_access_status.attr,
+ &target_core_alua_tg_pt_gp_alua_access_type.attr,
+ &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+ &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+ &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+ &target_core_alua_tg_pt_gp_preferred.attr,
+ &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+ &target_core_alua_tg_pt_gp_members.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+ .show_attribute = target_core_alua_tg_pt_gp_attr_show,
+ .store_attribute = target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+ .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
+ .ct_attrs = target_core_alua_tg_pt_gp_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua *alua = container_of(group, struct t10_alua,
+ alua_tg_pt_gps_group);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+ struct config_group *alua_tg_pt_gp_cg = NULL;
+ struct config_item *alua_tg_pt_gp_ci = NULL;
+
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+ if (!(tg_pt_gp))
+ return NULL;
+
+ alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+ alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+ config_group_init_type_name(alua_tg_pt_gp_cg, name,
+ &target_core_alua_tg_pt_gp_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+ " Group: alua/tg_pt_gps/%s\n",
+ config_item_name(alua_tg_pt_gp_ci));
+
+ return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+ struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+ " Group: alua/tg_pt_gps/%s, ID: %hu\n",
+ config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+
+ config_item_put(item);
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+ .make_group = &target_core_alua_create_tg_pt_gp,
+ .drop_item = &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+ .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua. There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+ .ct_item_ops = NULL,
+ .ct_attrs = NULL,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct se_subsystem_dev *se_dev;
+ struct se_subsystem_api *t;
+ struct config_item *hba_ci = &group->cg_item;
+ struct se_hba *hba = item_to_hba(hba_ci);
+ struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+
+ if (mutex_lock_interruptible(&hba->hba_access_mutex))
+ return NULL;
+
+ /*
+ * Locate the struct se_subsystem_api from parent's struct se_hba.
+ */
+ t = hba->transport;
+
+ se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+ if (!se_dev) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_subsystem_dev\n");
+ goto unlock;
+ }
+ INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_reservation.registration_lock);
+ spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock_init(&se_dev->se_dev_lock);
+ se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_wwn.t10_sub_dev = se_dev;
+ se_dev->t10_alua.t10_sub_dev = se_dev;
+ se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+ se_dev->se_dev_hba = hba;
+ dev_cg = &se_dev->se_dev_group;
+
+ dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ GFP_KERNEL);
+ if (!(dev_cg->default_groups))
+ goto out;
+ /*
+ * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+ * for ->allocate_virtdevice()
+ *
+ * se_dev->se_dev_ptr will be set after ->create_virtdev()
+ * has been called successfully in the next level up in the
+ * configfs tree for device object's struct config_group.
+ */
+ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ " from allocate_virtdevice()\n");
+ goto out;
+ }
+ spin_lock(&se_global->g_device_lock);
+ list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
+ spin_unlock(&se_global->g_device_lock);
+
+ config_group_init_type_name(&se_dev->se_dev_group, name,
+ &target_core_dev_cit);
+ config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+ &target_core_dev_attrib_cit);
+ config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+ &target_core_dev_pr_cit);
+ config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+ &target_core_dev_wwn_cit);
+ config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+ "alua", &target_core_alua_tg_pt_gps_cit);
+ dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+ dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+ dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+ dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+ dev_cg->default_groups[4] = NULL;
+ /*
+ * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+ */
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+ if (!(tg_pt_gp))
+ goto out;
+
+ tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(tg_pt_gp_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+ "default_groups\n");
+ goto out;
+ }
+
+ config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+ "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+ tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+ tg_pt_gp_cg->default_groups[1] = NULL;
+ T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+ " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+ mutex_unlock(&hba->hba_access_mutex);
+ return &se_dev->se_dev_group;
+out:
+ if (T10_ALUA(se_dev)->default_tg_pt_gp) {
+ core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+ T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ }
+ if (tg_pt_gp_cg)
+ kfree(tg_pt_gp_cg->default_groups);
+ if (dev_cg)
+ kfree(dev_cg->default_groups);
+ if (se_dev->se_dev_su_ptr)
+ t->free_device(se_dev->se_dev_su_ptr);
+ kfree(se_dev);
+unlock:
+ mutex_unlock(&hba->hba_access_mutex);
+ return NULL;
+}
+
+static void target_core_drop_subdev(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+ struct se_subsystem_dev, se_dev_group);
+ struct se_hba *hba;
+ struct se_subsystem_api *t;
+ struct config_item *df_item;
+ struct config_group *dev_cg, *tg_pt_gp_cg;
+ int i, ret;
+
+ hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+ if (mutex_lock_interruptible(&hba->hba_access_mutex))
+ goto out;
+
+ t = hba->transport;
+
+ spin_lock(&se_global->g_device_lock);
+ list_del(&se_dev->g_se_dev_list);
+ spin_unlock(&se_global->g_device_lock);
+
+ tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+ df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+ tg_pt_gp_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(tg_pt_gp_cg->default_groups);
+ core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+ T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+
+ dev_cg = &se_dev->se_dev_group;
+ for (i = 0; dev_cg->default_groups[i]; i++) {
+ df_item = &dev_cg->default_groups[i]->cg_item;
+ dev_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+
+ config_item_put(item);
+ /*
+ * This pointer will set when the storage is enabled with:
+ * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ */
+ if (se_dev->se_dev_ptr) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ "virtual_device() for se_dev_ptr: %p\n",
+ se_dev->se_dev_ptr);
+
+ ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
+ if (ret < 0)
+ goto hba_out;
+ } else {
+ /*
+ * Release struct se_subsystem_dev->se_dev_su_ptr..
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ "device() for se_dev_su_ptr: %p\n",
+ se_dev->se_dev_su_ptr);
+
+ t->free_device(se_dev->se_dev_su_ptr);
+ }
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ "_dev_t: %p\n", se_dev);
+
+hba_out:
+ mutex_unlock(&hba->hba_access_mutex);
+out:
+ kfree(se_dev);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+ .make_group = target_core_make_subdev,
+ .drop_item = target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode) \
+static struct target_core_hba_attribute \
+ target_core_hba_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_hba_show_attr_##_name, \
+ target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name) \
+static struct target_core_hba_attribute \
+ target_core_hba_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+ struct se_hba *hba,
+ char *page)
+{
+ return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+ hba->hba_id, hba->transport->name,
+ TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+ char *page)
+{
+ int hba_mode = 0;
+
+ if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+ hba_mode = 1;
+
+ return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+ const char *page, size_t count)
+{
+ struct se_subsystem_api *transport = hba->transport;
+ unsigned long mode_flag;
+ int ret;
+
+ if (transport->pmode_enable_hba == NULL)
+ return -EINVAL;
+
+ ret = strict_strtoul(page, 0, &mode_flag);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+ return -EINVAL;
+ }
+
+ spin_lock(&hba->device_lock);
+ if (!(list_empty(&hba->hba_dev_list))) {
+ printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+ spin_unlock(&hba->device_lock);
+ return -EINVAL;
+ }
+ spin_unlock(&hba->device_lock);
+
+ ret = transport->pmode_enable_hba(hba, mode_flag);
+ if (ret < 0)
+ return -EINVAL;
+ if (ret > 0)
+ hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+ else if (ret == 0)
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+ return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+ &target_core_hba_hba_info.attr,
+ &target_core_hba_hba_mode.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+ .show_attribute = target_core_hba_attr_show,
+ .store_attribute = target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+ .ct_item_ops = &target_core_hba_item_ops,
+ .ct_group_ops = &target_core_hba_group_ops,
+ .ct_attrs = target_core_hba_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+ struct config_group *group,
+ const char *name)
+{
+ char *se_plugin_str, *str, *str2;
+ struct se_hba *hba;
+ char buf[TARGET_CORE_NAME_MAX_LEN];
+ unsigned long plugin_dep_id = 0;
+ int ret;
+
+ memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+ if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+ printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+ " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+ TARGET_CORE_NAME_MAX_LEN);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+ snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+ str = strstr(buf, "_");
+ if (!(str)) {
+ printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+ return ERR_PTR(-EINVAL);
+ }
+ se_plugin_str = buf;
+ /*
+ * Special case for subsystem plugins that have "_" in their names.
+ * Namely rd_direct and rd_mcp..
+ */
+ str2 = strstr(str+1, "_");
+ if ((str2)) {
+ *str2 = '\0'; /* Terminate for *se_plugin_str */
+ str2++; /* Skip to start of plugin dependent ID */
+ str = str2;
+ } else {
+ *str = '\0'; /* Terminate for *se_plugin_str */
+ str++; /* Skip to start of plugin dependent ID */
+ }
+
+ ret = strict_strtoul(str, 0, &plugin_dep_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " plugin_dep_id\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * Load up TCM subsystem plugins if they have not already been loaded.
+ */
+ if (transport_subsystem_check_init() < 0)
+ return ERR_PTR(-EINVAL);
+
+ hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+ if (IS_ERR(hba))
+ return ERR_CAST(hba);
+
+ config_group_init_type_name(&hba->hba_group, name,
+ &target_core_hba_cit);
+
+ return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_hba *hba = item_to_hba(item);
+
+ config_item_put(item);
+ core_delete_hba(hba);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+ .make_group = target_core_call_addhbatotarget,
+ .drop_item = target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+ .ct_item_ops = NULL,
+ .ct_group_ops = &target_core_group_ops,
+ .ct_attrs = NULL,
+ .ct_owner = THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int target_core_init_configfs(void)
+{
+ struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+ struct config_group *lu_gp_cg = NULL;
+ struct configfs_subsystem *subsys;
+ struct proc_dir_entry *scsi_target_proc = NULL;
+ struct t10_alua_lu_gp *lu_gp;
+ int ret;
+
+ printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+ " Engine: %s on %s/%s on "UTS_RELEASE"\n",
+ TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+ subsys = target_core_subsystem[0];
+ config_group_init(&subsys->su_group);
+ mutex_init(&subsys->su_mutex);
+
+ INIT_LIST_HEAD(&g_tf_list);
+ mutex_init(&g_tf_lock);
+ init_scsi_index_table();
+ ret = init_se_global();
+ if (ret < 0)
+ return -1;
+ /*
+ * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+ * and ALUA Logical Unit Group and Target Port Group infrastructure.
+ */
+ target_cg = &subsys->su_group;
+ target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(target_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&se_global->target_core_hbagroup,
+ "core", &target_core_cit);
+ target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+ target_cg->default_groups[1] = NULL;
+ /*
+ * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+ */
+ hba_cg = &se_global->target_core_hbagroup;
+ hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(hba_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+ goto out_global;
+ }
+ config_group_init_type_name(&se_global->alua_group,
+ "alua", &target_core_alua_cit);
+ hba_cg->default_groups[0] = &se_global->alua_group;
+ hba_cg->default_groups[1] = NULL;
+ /*
+ * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+ * groups under /sys/kernel/config/target/core/alua/
+ */
+ alua_cg = &se_global->alua_group;
+ alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(alua_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&se_global->alua_lu_gps_group,
+ "lu_gps", &target_core_alua_lu_gps_cit);
+ alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+ alua_cg->default_groups[1] = NULL;
+ /*
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+ if (IS_ERR(lu_gp))
+ goto out_global;
+
+ lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(lu_gp_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+ &target_core_alua_lu_gp_cit);
+ lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+ lu_gp_cg->default_groups[1] = NULL;
+ se_global->default_lu_gp = lu_gp;
+ /*
+ * Register the target_core_mod subsystem with configfs.
+ */
+ ret = configfs_register_subsystem(subsys);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ ret, subsys->su_group.cg_item.ci_namebuf);
+ goto out_global;
+ }
+ printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+ " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+ " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+ /*
+ * Register built-in RAMDISK subsystem logic for virtual LUN 0
+ */
+ ret = rd_module_init();
+ if (ret < 0)
+ goto out;
+
+ if (core_dev_setup_virtual_lun0() < 0)
+ goto out;
+
+ scsi_target_proc = proc_mkdir("scsi_target", 0);
+ if (!(scsi_target_proc)) {
+ printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
+ goto out;
+ }
+ ret = init_scsi_target_mib();
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ configfs_unregister_subsystem(subsys);
+ if (scsi_target_proc)
+ remove_proc_entry("scsi_target", 0);
+ core_dev_release_virtual_lun0();
+ rd_module_exit();
+out_global:
+ if (se_global->default_lu_gp) {
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+ }
+ if (lu_gp_cg)
+ kfree(lu_gp_cg->default_groups);
+ if (alua_cg)
+ kfree(alua_cg->default_groups);
+ if (hba_cg)
+ kfree(hba_cg->default_groups);
+ kfree(target_cg->default_groups);
+ release_se_global();
+ return -1;
+}
+
+static void target_core_exit_configfs(void)
+{
+ struct configfs_subsystem *subsys;
+ struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+ struct config_item *item;
+ int i;
+
+ se_global->in_shutdown = 1;
+ subsys = target_core_subsystem[0];
+
+ lu_gp_cg = &se_global->alua_lu_gps_group;
+ for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+ item = &lu_gp_cg->default_groups[i]->cg_item;
+ lu_gp_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(lu_gp_cg->default_groups);
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+
+ alua_cg = &se_global->alua_group;
+ for (i = 0; alua_cg->default_groups[i]; i++) {
+ item = &alua_cg->default_groups[i]->cg_item;
+ alua_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(alua_cg->default_groups);
+
+ hba_cg = &se_global->target_core_hbagroup;
+ for (i = 0; hba_cg->default_groups[i]; i++) {
+ item = &hba_cg->default_groups[i]->cg_item;
+ hba_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(hba_cg->default_groups);
+
+ for (i = 0; subsys->su_group.default_groups[i]; i++) {
+ item = &subsys->su_group.default_groups[i]->cg_item;
+ subsys->su_group.default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(subsys->su_group.default_groups);
+
+ configfs_unregister_subsystem(subsys);
+ printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+ " Infrastructure\n");
+
+ remove_scsi_target_mib();
+ remove_proc_entry("scsi_target", 0);
+ core_dev_release_virtual_lun0();
+ rd_module_exit();
+ release_se_global();
+
+ return;
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 000000000000..317ce58d426d
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1694 @@
+/*******************************************************************************
+ * Filename: target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+int transport_get_lun_for_cmd(
+ struct se_cmd *se_cmd,
+ unsigned char *cdb,
+ u32 unpacked_lun)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun = NULL;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ unsigned long flags;
+ int read_only = 0;
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ deve = se_cmd->se_deve =
+ &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (se_cmd) {
+ deve->total_cmds++;
+ deve->total_bytes += se_cmd->data_length;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ if (deve->lun_flags &
+ TRANSPORT_LUNFLAGS_READ_ONLY) {
+ read_only = 1;
+ goto out;
+ }
+ deve->write_bytes += se_cmd->data_length;
+ } else if (se_cmd->data_direction ==
+ DMA_FROM_DEVICE) {
+ deve->read_bytes += se_cmd->data_length;
+ }
+ }
+ deve->deve_cmds++;
+
+ se_lun = se_cmd->se_lun = deve->se_lun;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ }
+out:
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ if (!se_lun) {
+ if (read_only) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ return -1;
+ } else {
+ /*
+ * Use the se_portal_group->tpg_virt_lun0 to allow for
+ * REPORT_LUNS, et al to be returned when no active
+ * MappedLUN=0 exists for this Initiator Port.
+ */
+ if (unpacked_lun != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ return -1;
+ }
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+#if 0
+ printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
+ CMD_TFO(se_cmd)->get_fabric_name());
+#endif
+ se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->orig_fe_lun = 0;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ }
+ }
+ /*
+ * Determine if the struct se_lun is online.
+ */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+ if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+
+ {
+ struct se_device *dev = se_lun->lun_se_dev;
+ spin_lock(&dev->stats_lock);
+ dev->num_cmds++;
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ dev->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ dev->read_bytes += se_cmd->data_length;
+ spin_unlock(&dev->stats_lock);
+ }
+
+ /*
+ * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
+ * for tracking state of struct se_cmds during LUN shutdown events.
+ */
+ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+ list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+ atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
+#if 0
+ printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
+ CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
+#endif
+ spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_cmd);
+
+int transport_get_lun_for_tmr(
+ struct se_cmd *se_cmd,
+ u32 unpacked_lun)
+{
+ struct se_device *dev = NULL;
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun = NULL;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ deve = se_cmd->se_deve =
+ &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
+ dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+ }
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ if (!se_lun) {
+ printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+ /*
+ * Determine if the struct se_lun is online.
+ */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+ if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+
+ spin_lock(&dev->se_tmr_lock);
+ list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
+ spin_unlock(&dev->se_tmr_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_tmr);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+ struct se_node_acl *nacl,
+ u16 rtpi)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_port *port;
+ struct se_portal_group *tpg = nacl->se_tpg;
+ u32 i;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ lun = deve->se_lun;
+ if (!(lun)) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ port = lun->lun_sep;
+ if (!(port)) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ if (port->sep_rtpi != rtpi)
+ continue;
+
+ atomic_inc(&deve->pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return deve;
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return NULL;
+}
+
+int core_free_device_list_for_node(
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ u32 i;
+
+ if (!nacl->device_list)
+ return 0;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ if (!deve->se_lun) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ lun = deve->se_lun;
+
+ spin_unlock_irq(&nacl->device_list_lock);
+ core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ spin_lock_irq(&nacl->device_list_lock);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ kfree(nacl->device_list);
+ nacl->device_list = NULL;
+
+ return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+ struct se_dev_entry *deve;
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+ deve->deve_cmds--;
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ return;
+}
+
+void core_update_device_list_access(
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[mapped_lun];
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return;
+}
+
+/* core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl,
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg,
+ int enable)
+{
+ struct se_port *port = lun->lun_sep;
+ struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+ int trans = 0;
+ /*
+ * If the MappedLUN entry is being disabled, the entry in
+ * port->sep_alua_list must be removed now before clearing the
+ * struct se_dev_entry pointers below as logic in
+ * core_alua_do_transition_tg_pt() depends on these being present.
+ */
+ if (!(enable)) {
+ /*
+ * deve->se_lun_acl will be NULL for demo-mode created LUNs
+ * that have not been explictly concerted to MappedLUNs ->
+ * struct se_lun_acl.
+ */
+ if (!(deve->se_lun_acl))
+ return 0;
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_del(&deve->alua_port_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+ }
+
+ spin_lock_irq(&nacl->device_list_lock);
+ if (enable) {
+ /*
+ * Check if the call is handling demo mode -> explict LUN ACL
+ * transition. This transition must be for the same struct se_lun
+ * + mapped_lun that was setup in demo mode..
+ */
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (deve->se_lun_acl != NULL) {
+ printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+ " already set for demo mode -> explict"
+ " LUN ACL transition\n");
+ return -1;
+ }
+ if (deve->se_lun != lun) {
+ printk(KERN_ERR "struct se_dev_entry->se_lun does"
+ " match passed struct se_lun for demo mode"
+ " -> explict LUN ACL transition\n");
+ return -1;
+ }
+ deve->se_lun_acl = lun_acl;
+ trans = 1;
+ } else {
+ deve->se_lun = lun;
+ deve->se_lun_acl = lun_acl;
+ deve->mapped_lun = mapped_lun;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+ }
+
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ }
+
+ if (trans) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return 0;
+ }
+ deve->creation_time = get_jiffies_64();
+ deve->attach_count++;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ return 0;
+ }
+ /*
+ * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+ * PR operation to complete.
+ */
+ spin_unlock_irq(&nacl->device_list_lock);
+ while (atomic_read(&deve->pr_ref_count) != 0)
+ cpu_relax();
+ spin_lock_irq(&nacl->device_list_lock);
+ /*
+ * Disable struct se_dev_entry LUN ACL mapping
+ */
+ core_scsi3_ua_release_all(deve);
+ deve->se_lun = NULL;
+ deve->se_lun_acl = NULL;
+ deve->lun_flags = 0;
+ deve->creation_time = 0;
+ deve->attach_count--;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+ return 0;
+}
+
+/* core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+ struct se_node_acl *nacl;
+ struct se_dev_entry *deve;
+ u32 i;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+ if (lun != deve->se_lun)
+ continue;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ core_update_device_list_for_node(lun, NULL,
+ deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+ nacl, tpg, 0);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return;
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+ struct se_port *port, *port_tmp;
+
+ port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+ if (!(port)) {
+ printk(KERN_ERR "Unable to allocate struct se_port\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&port->sep_alua_list);
+ INIT_LIST_HEAD(&port->sep_list);
+ atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+ spin_lock_init(&port->sep_alua_lock);
+ mutex_init(&port->sep_tg_pt_md_mutex);
+
+ spin_lock(&dev->se_port_lock);
+ if (dev->dev_port_count == 0x0000ffff) {
+ printk(KERN_WARNING "Reached dev->dev_port_count =="
+ " 0x0000ffff\n");
+ spin_unlock(&dev->se_port_lock);
+ return NULL;
+ }
+again:
+ /*
+ * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+ * Here is the table from spc4r17 section 7.7.3.8.
+ *
+ * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+ *
+ * Code Description
+ * 0h Reserved
+ * 1h Relative port 1, historically known as port A
+ * 2h Relative port 2, historically known as port B
+ * 3h to FFFFh Relative port 3 through 65 535
+ */
+ port->sep_rtpi = dev->dev_rpti_counter++;
+ if (!(port->sep_rtpi))
+ goto again;
+
+ list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+ /*
+ * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+ * for 16-bit wrap..
+ */
+ if (port->sep_rtpi == port_tmp->sep_rtpi)
+ goto again;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return port;
+}
+
+static void core_export_port(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_port *port,
+ struct se_lun *lun)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+ spin_lock(&dev->se_port_lock);
+ spin_lock(&lun->lun_sep_lock);
+ port->sep_tpg = tpg;
+ port->sep_lun = lun;
+ lun->lun_sep = port;
+ spin_unlock(&lun->lun_sep_lock);
+
+ list_add_tail(&port->sep_list, &dev->dev_sep_list);
+ spin_unlock(&dev->se_port_lock);
+
+ if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+ tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+ if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+ printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+ "_gp_member_t\n");
+ return;
+ }
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+ " Group: alua/default_tg_pt_gp\n",
+ TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+ }
+
+ dev->dev_port_count++;
+ port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ * Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+{
+ /*
+ * Wait for any port reference for PR ALL_TG_PT=1 operation
+ * to complete in __core_scsi3_alloc_registration()
+ */
+ spin_unlock(&dev->se_port_lock);
+ if (atomic_read(&port->sep_tg_pt_ref_cnt))
+ cpu_relax();
+ spin_lock(&dev->se_port_lock);
+
+ core_alua_free_tg_pt_gp_mem(port);
+
+ list_del(&port->sep_list);
+ dev->dev_port_count--;
+ kfree(port);
+
+ return;
+}
+
+int core_dev_export(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ struct se_port *port;
+
+ port = core_alloc_port(dev);
+ if (!(port))
+ return -1;
+
+ lun->lun_se_dev = dev;
+ se_dev_start(dev);
+
+ atomic_inc(&dev->dev_export_obj.obj_access_count);
+ core_export_port(dev, tpg, port, lun);
+ return 0;
+}
+
+void core_dev_unexport(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ struct se_port *port = lun->lun_sep;
+
+ spin_lock(&lun->lun_sep_lock);
+ if (lun->lun_se_dev == NULL) {
+ spin_unlock(&lun->lun_sep_lock);
+ return;
+ }
+ spin_unlock(&lun->lun_sep_lock);
+
+ spin_lock(&dev->se_port_lock);
+ atomic_dec(&dev->dev_export_obj.obj_access_count);
+ core_release_port(dev, port);
+ spin_unlock(&dev->se_port_lock);
+
+ se_dev_stop(dev);
+ lun->lun_se_dev = NULL;
+}
+
+int transport_core_report_lun_response(struct se_cmd *se_cmd)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_task *se_task;
+ unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8;
+ u64 i, lun;
+
+ list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+ break;
+
+ if (!(se_task)) {
+ printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+ * coming via a target_core_mod PASSTHROUGH op, and not through
+ * a $FABRIC_MOD. In that case, report LUN=0 only.
+ */
+ if (!(se_sess)) {
+ lun = 0;
+ buf[offset++] = ((lun >> 56) & 0xff);
+ buf[offset++] = ((lun >> 48) & 0xff);
+ buf[offset++] = ((lun >> 40) & 0xff);
+ buf[offset++] = ((lun >> 32) & 0xff);
+ buf[offset++] = ((lun >> 24) & 0xff);
+ buf[offset++] = ((lun >> 16) & 0xff);
+ buf[offset++] = ((lun >> 8) & 0xff);
+ buf[offset++] = (lun & 0xff);
+ lun_count = 1;
+ goto done;
+ }
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &SE_NODE_ACL(se_sess)->device_list[i];
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+ se_lun = deve->se_lun;
+ /*
+ * We determine the correct LUN LIST LENGTH even once we
+ * have reached the initial allocation length.
+ * See SPC2-R20 7.19.
+ */
+ lun_count++;
+ if ((cdb_offset + 8) >= se_cmd->data_length)
+ continue;
+
+ lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
+ buf[offset++] = ((lun >> 56) & 0xff);
+ buf[offset++] = ((lun >> 48) & 0xff);
+ buf[offset++] = ((lun >> 40) & 0xff);
+ buf[offset++] = ((lun >> 32) & 0xff);
+ buf[offset++] = ((lun >> 24) & 0xff);
+ buf[offset++] = ((lun >> 16) & 0xff);
+ buf[offset++] = ((lun >> 8) & 0xff);
+ buf[offset++] = (lun & 0xff);
+ cdb_offset += 8;
+ }
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ /*
+ * See SPC3 r07, page 159.
+ */
+done:
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+ (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+ se_dev_stop(dev);
+
+ if (dev->dev_ptr) {
+ kthread_stop(dev->process_thread);
+ if (dev->transport->free_device)
+ dev->transport->free_device(dev->dev_ptr);
+ }
+
+ spin_lock(&hba->device_lock);
+ list_del(&dev->dev_list);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+
+ core_scsi3_free_all_registrations(dev);
+ se_release_vpd_for_dev(dev);
+
+ kfree(dev->dev_status_queue_obj);
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+
+ return;
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+ struct t10_vpd *vpd, *vpd_tmp;
+
+ spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+ list_for_each_entry_safe(vpd, vpd_tmp,
+ &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+ list_del(&vpd->vpd_list);
+ kfree(vpd);
+ }
+ spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+
+ return;
+}
+
+/*
+ * Called with struct se_hba->device_lock held.
+ */
+void se_clear_dev_ports(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ struct se_port *sep, *sep_tmp;
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+ spin_unlock(&dev->se_port_lock);
+ spin_unlock(&hba->device_lock);
+
+ lun = sep->sep_lun;
+ tpg = sep->sep_tpg;
+ spin_lock(&lun->lun_sep_lock);
+ if (lun->lun_se_dev == NULL) {
+ spin_unlock(&lun->lun_sep_lock);
+ continue;
+ }
+ spin_unlock(&lun->lun_sep_lock);
+
+ core_dev_del_lun(tpg, lun->unpacked_lun);
+
+ spin_lock(&hba->device_lock);
+ spin_lock(&dev->se_port_lock);
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return;
+}
+
+/* se_free_virtual_device():
+ *
+ * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+ spin_lock(&hba->device_lock);
+ se_clear_dev_ports(dev);
+ spin_unlock(&hba->device_lock);
+
+ core_alua_free_lu_gp_mem(dev);
+ se_release_device_for_hba(dev);
+
+ return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ spin_lock(&hba->device_lock);
+ atomic_inc(&dev->dev_obj.obj_access_count);
+ if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+ if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+ } else if (dev->dev_status &
+ TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+ dev->dev_status &=
+ ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+ }
+ }
+ spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ spin_lock(&hba->device_lock);
+ atomic_dec(&dev->dev_obj.obj_access_count);
+ if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+ if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+ } else if (dev->dev_status &
+ TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+ }
+ }
+ spin_unlock(&hba->device_lock);
+
+ while (atomic_read(&hba->dev_mib_access_count))
+ cpu_relax();
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+ int ret;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+ spin_unlock_irq(&dev->dev_status_lock);
+
+ return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+ int ret;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+ spin_unlock_irq(&dev->dev_status_lock);
+
+ return ret;
+}
+
+void se_dev_set_default_attribs(
+ struct se_device *dev,
+ struct se_dev_limits *dev_limits)
+{
+ struct queue_limits *limits = &dev_limits->limits;
+
+ DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
+ DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
+ DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
+ DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
+ DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
+ DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
+ DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
+ DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ /*
+ * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+ * iblock_create_virtdevice() from struct queue_limits values
+ * if blk_queue_discard()==1
+ */
+ DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ /*
+ * block_size is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
+ DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+ /*
+ * max_sectors is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
+ DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+ /*
+ * Set optimal_sectors from max_sectors, which can be lowered via
+ * configfs.
+ */
+ DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+ /*
+ * queue_depth is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
+ DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
+{
+ if (task_timeout > DA_TASK_TIMEOUT_MAX) {
+ printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+ " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
+ return -1;
+ } else {
+ DEV_ATTRIB(dev)->task_timeout = task_timeout;
+ printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+ dev, task_timeout);
+ }
+
+ return 0;
+}
+
+int se_dev_set_max_unmap_lba_count(
+ struct se_device *dev,
+ u32 max_unmap_lba_count)
+{
+ DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
+ printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+ dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+ return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+ struct se_device *dev,
+ u32 max_unmap_block_desc_count)
+{
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
+ printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+ dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+ return 0;
+}
+
+int se_dev_set_unmap_granularity(
+ struct se_device *dev,
+ u32 unmap_granularity)
+{
+ DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
+ printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+ dev, DEV_ATTRIB(dev)->unmap_granularity);
+ return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+ struct se_device *dev,
+ u32 unmap_granularity_alignment)
+{
+ DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
+ printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+ dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+ return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->dpo_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_dpo = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+ " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+ return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_write_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_fua_write = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_fua_write);
+ return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_read_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_fua_read = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_fua_read);
+ return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->write_cache_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_write_cache = flag;
+ printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_write_cache);
+ return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1) && (flag != 2)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " UA_INTRLCK_CTRL while dev_export_obj: %d count"
+ " exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
+ printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+
+ return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+ " dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_tas = flag;
+ printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+ printk(KERN_ERR "Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ DEV_ATTRIB(dev)->emulate_tpu = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+ printk(KERN_ERR "Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ DEV_ATTRIB(dev)->emulate_tpws = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ DEV_ATTRIB(dev)->enforce_pr_isids = flag;
+ printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+ (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+ return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+ u32 orig_queue_depth = dev->queue_depth;
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+ " dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ if (!(queue_depth)) {
+ printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+ "_depth\n", dev);
+ return -1;
+ }
+
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+ printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+ " exceeds TCM/SE_Device TCQ: %u\n",
+ dev, queue_depth,
+ DEV_ATTRIB(dev)->hw_queue_depth);
+ return -1;
+ }
+ } else {
+ if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
+ if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+ printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, queue_depth,
+ DEV_ATTRIB(dev)->hw_queue_depth);
+ return -1;
+ }
+ }
+ }
+
+ DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+ if (queue_depth > orig_queue_depth)
+ atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
+ else if (queue_depth < orig_queue_depth)
+ atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
+
+ printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+ dev, queue_depth);
+ return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+ int force = 0; /* Force setting for VDEVS */
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " max_sectors while dev_export_obj: %d count exists\n",
+ dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ if (!(max_sectors)) {
+ printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+ " max_sectors\n", dev);
+ return -1;
+ }
+ if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+ " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+ DA_STATUS_MAX_SECTORS_MIN);
+ return -1;
+ }
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than TCM/SE_Device max_sectors:"
+ " %u\n", dev, max_sectors,
+ DEV_ATTRIB(dev)->hw_max_sectors);
+ return -1;
+ }
+ } else {
+ if (!(force) && (max_sectors >
+ DEV_ATTRIB(dev)->hw_max_sectors)) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than TCM/SE_Device max_sectors"
+ ": %u, use force=1 to override.\n", dev,
+ max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
+ return -1;
+ }
+ if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than DA_STATUS_MAX_SECTORS_MAX:"
+ " %u\n", dev, max_sectors,
+ DA_STATUS_MAX_SECTORS_MAX);
+ return -1;
+ }
+ }
+
+ DEV_ATTRIB(dev)->max_sectors = max_sectors;
+ printk("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev, max_sectors);
+ return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " optimal_sectors while dev_export_obj: %d count exists\n",
+ dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -EINVAL;
+ }
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+ " changed for TCM/pSCSI\n", dev);
+ return -EINVAL;
+ }
+ if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
+ printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+ " greater than max_sectors: %u\n", dev,
+ optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+ return -EINVAL;
+ }
+
+ DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
+ printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+ dev, optimal_sectors);
+ return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+ " while dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+
+ if ((block_size != 512) &&
+ (block_size != 1024) &&
+ (block_size != 2048) &&
+ (block_size != 4096)) {
+ printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+ " for SE device, must be 512, 1024, 2048 or 4096\n",
+ dev, block_size);
+ return -1;
+ }
+
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+ " Physical Device, use for Linux/SCSI to change"
+ " block_size for underlying hardware\n", dev);
+ return -1;
+ }
+
+ DEV_ATTRIB(dev)->block_size = block_size;
+ printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+ dev, block_size);
+ return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+ struct se_portal_group *tpg,
+ struct se_hba *hba,
+ struct se_device *dev,
+ u32 lun)
+{
+ struct se_lun *lun_p;
+ u32 lun_access = 0;
+
+ if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+ printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+ atomic_read(&dev->dev_access_obj.obj_access_count));
+ return NULL;
+ }
+
+ lun_p = core_tpg_pre_addlun(tpg, lun);
+ if ((IS_ERR(lun_p)) || !(lun_p))
+ return NULL;
+
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+ if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
+ return NULL;
+
+ printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+ /*
+ * Update LUN maps for dynamically added initiators when
+ * generate_node_acl is enabled.
+ */
+ if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+ struct se_node_acl *acl;
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (acl->dynamic_node_acl) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+ core_tpg_add_node_to_devs(acl, tpg);
+ spin_lock_bh(&tpg->acl_node_lock);
+ }
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+ }
+
+ return lun_p;
+}
+
+/* core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun)
+{
+ struct se_lun *lun;
+ int ret = 0;
+
+ lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
+ if (!(lun))
+ return ret;
+
+ core_tpg_post_dellun(tpg, lun);
+
+ printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+ " device object\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
+ TPG_TFO(tpg)->get_fabric_name());
+
+ return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+ "_PER_TPG-1: %u for Target Portal Group: %hu\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+/* core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+ "_TPG-1: %u for Target Portal Group: %hu\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ u32 mapped_lun,
+ char *initiatorname,
+ int *ret)
+{
+ struct se_lun_acl *lacl;
+ struct se_node_acl *nacl;
+
+ if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+ printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ *ret = -EOVERFLOW;
+ return NULL;
+ }
+ nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if (!(nacl)) {
+ *ret = -EINVAL;
+ return NULL;
+ }
+ lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+ if (!(lacl)) {
+ printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+ *ret = -ENOMEM;
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&lacl->lacl_list);
+ lacl->mapped_lun = mapped_lun;
+ lacl->se_lun_nacl = nacl;
+ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+
+ return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun_acl *lacl,
+ u32 unpacked_lun,
+ u32 lun_access)
+{
+ struct se_lun *lun;
+ struct se_node_acl *nacl;
+
+ lun = core_dev_get_lun(tpg, unpacked_lun);
+ if (!(lun)) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return -EINVAL;
+ }
+
+ nacl = lacl->se_lun_nacl;
+ if (!(nacl))
+ return -EINVAL;
+
+ if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+ (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+ lacl->se_lun = lun;
+
+ if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+ lun_access, nacl, tpg, 1) < 0)
+ return -EINVAL;
+
+ spin_lock(&lun->lun_acl_lock);
+ list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+ atomic_inc(&lun->lun_acl_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&lun->lun_acl_lock);
+
+ printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+ lacl->initiatorname);
+ /*
+ * Check to see if there are any existing persistent reservation APTPL
+ * pre-registrations that need to be enabled for this LUN ACL..
+ */
+ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+ return 0;
+}
+
+/* core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ struct se_lun_acl *lacl)
+{
+ struct se_node_acl *nacl;
+
+ nacl = lacl->se_lun_nacl;
+ if (!(nacl))
+ return -EINVAL;
+
+ spin_lock(&lun->lun_acl_lock);
+ list_del(&lacl->lacl_list);
+ atomic_dec(&lun->lun_acl_count);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&lun->lun_acl_lock);
+
+ core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+ lacl->se_lun = NULL;
+
+ printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+ " InitiatorNode: %s Mapped LUN: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ lacl->initiatorname, lacl->mapped_lun);
+
+ return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun_acl *lacl)
+{
+ printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+ " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ TPG_TFO(tpg)->get_fabric_name(),
+ lacl->initiatorname, lacl->mapped_lun);
+
+ kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+ struct se_hba *hba;
+ struct se_device *dev;
+ struct se_subsystem_dev *se_dev = NULL;
+ struct se_subsystem_api *t;
+ char buf[16];
+ int ret;
+
+ hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+ if (IS_ERR(hba))
+ return PTR_ERR(hba);
+
+ se_global->g_lun0_hba = hba;
+ t = hba->transport;
+
+ se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+ if (!(se_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_subsystem_dev\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_reservation.registration_lock);
+ spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock_init(&se_dev->se_dev_lock);
+ se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_wwn.t10_sub_dev = se_dev;
+ se_dev->t10_alua.t10_sub_dev = se_dev;
+ se_dev->se_dev_attrib.da_sub_dev = se_dev;
+ se_dev->se_dev_hba = hba;
+
+ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ " from allocate_virtdevice()\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ se_global->g_lun0_su_dev = se_dev;
+
+ memset(buf, 0, 16);
+ sprintf(buf, "rd_pages=8");
+ t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+ dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+ if (!(dev) || IS_ERR(dev)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ se_dev->se_dev_ptr = dev;
+ se_global->g_lun0_dev = dev;
+
+ return 0;
+out:
+ se_global->g_lun0_su_dev = NULL;
+ kfree(se_dev);
+ if (se_global->g_lun0_hba) {
+ core_delete_hba(se_global->g_lun0_hba);
+ se_global->g_lun0_hba = NULL;
+ }
+ return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+ struct se_hba *hba = se_global->g_lun0_hba;
+ struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+
+ if (!(hba))
+ return;
+
+ if (se_global->g_lun0_dev)
+ se_free_virtual_device(se_global->g_lun0_dev, hba);
+
+ kfree(su_dev);
+ core_delete_hba(hba);
+}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 000000000000..32b148d7e261
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,996 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010 Rising Tide Systems
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{ \
+ struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
+ struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = _attrs; \
+ cit->ct_owner = tf->tf_module; \
+ printk("Setup generic %s\n", __stringify(_name)); \
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+ struct config_item *lun_acl_ci,
+ struct config_item *lun_ci)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg;
+ struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+ int ret = 0, lun_access;
+ /*
+ * Ensure that the source port exists
+ */
+ if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
+ printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+ "_tpg does not exist\n");
+ return -EINVAL;
+ }
+ se_tpg = lun->lun_sep->sep_tpg;
+
+ nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+ tpg_ci = &nacl_ci->ci_group->cg_item;
+ wwn_ci = &tpg_ci->ci_group->cg_item;
+ tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+ wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+ /*
+ * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+ */
+ if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+ printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+ config_item_name(wwn_ci));
+ return -EINVAL;
+ }
+ if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+ printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+ " TPGT: %s\n", config_item_name(wwn_ci),
+ config_item_name(tpg_ci));
+ return -EINVAL;
+ }
+ /*
+ * If this struct se_node_acl was dynamically generated with
+ * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+ * which be will write protected (READ-ONLY) when
+ * tpg_1/attrib/demo_mode_write_protect=1
+ */
+ spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+ deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+ lun_access = deve->lun_flags;
+ else
+ lun_access =
+ (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+ se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+ TRANSPORT_LUNFLAGS_READ_WRITE;
+ spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+ /*
+ * Determine the actual mapped LUN value user wants..
+ *
+ * This value is what the SCSI Initiator actually sees the
+ * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+ */
+ ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+ lun->unpacked_lun, lun_access);
+
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+ struct config_item *lun_acl_ci,
+ struct config_item *lun_ci)
+{
+ struct se_lun *lun;
+ struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+ struct se_lun_acl, se_lun_group);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+ struct se_portal_group *se_tpg;
+ /*
+ * Determine if the underlying MappedLUN has already been released..
+ */
+ if (!(deve->se_lun))
+ return 0;
+
+ lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+ se_tpg = lun->lun_sep->sep_tpg;
+
+ core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+ return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_fabric_mappedlun_show_##_name, \
+ target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+ struct se_lun_acl *lacl,
+ char *page)
+{
+ struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t len;
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ deve = &se_nacl->device_list[lacl->mapped_lun];
+ len = sprintf(page, "%d\n",
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+ 1 : 0);
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+ struct se_lun_acl *lacl,
+ const char *page,
+ size_t count)
+{
+ struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ unsigned long op;
+
+ if (strict_strtoul(page, 0, &op))
+ return -EINVAL;
+
+ if ((op != 1) && (op != 0))
+ return -EINVAL;
+
+ core_update_device_list_access(lacl->mapped_lun, (op) ?
+ TRANSPORT_LUNFLAGS_READ_ONLY :
+ TRANSPORT_LUNFLAGS_READ_WRITE,
+ lacl->se_lun_nacl);
+
+ printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+ " Mapped LUN: %u Write Protect bit to %s\n",
+ TPG_TFO(se_tpg)->get_fabric_name(),
+ lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+ return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+ &target_fabric_mappedlun_write_protect.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+ .show_attribute = target_fabric_mappedlun_attr_show,
+ .store_attribute = target_fabric_mappedlun_attr_store,
+ .allow_link = target_fabric_mappedlun_link,
+ .drop_link = target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+ target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+ .show_attribute = target_fabric_nacl_attrib_attr_show,
+ .store_attribute = target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+ .show_attribute = target_fabric_nacl_auth_attr_show,
+ .store_attribute = target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+ .show_attribute = target_fabric_nacl_param_attr_show,
+ .store_attribute = target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl = container_of(group,
+ struct se_node_acl, acl_group);
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_lun_acl *lacl;
+ struct config_item *acl_ci;
+ char *buf;
+ unsigned long mapped_lun;
+ int ret = 0;
+
+ acl_ci = &group->cg_item;
+ if (!(acl_ci)) {
+ printk(KERN_ERR "Unable to locatel acl_ci\n");
+ return NULL;
+ }
+
+ buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate memory for name buf\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ snprintf(buf, strlen(name) + 1, "%s", name);
+ /*
+ * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+ */
+ if (strstr(buf, "lun_") != buf) {
+ printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+ " name: %s\n", buf, name);
+ ret = -EINVAL;
+ goto out;
+ }
+ /*
+ * Determine the Mapped LUN value. This is what the SCSI Initiator
+ * Port will actually see.
+ */
+ if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+ config_item_name(acl_ci), &ret);
+ if (!(lacl))
+ goto out;
+
+ config_group_init_type_name(&lacl->se_lun_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+
+ kfree(buf);
+ return &lacl->se_lun_group;
+out:
+ kfree(buf);
+ return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_lun_acl *lacl = container_of(to_config_group(item),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+ config_item_put(item);
+ core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+ .show_attribute = target_fabric_nacl_base_attr_show,
+ .store_attribute = target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+ .make_group = target_fabric_make_mappedlun,
+ .drop_item = target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+ &target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_acl_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_node_acl *se_nacl;
+ struct config_group *nacl_cg;
+
+ if (!(tf->tf_ops.fabric_make_nodeacl)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+ if (IS_ERR(se_nacl))
+ return ERR_PTR(PTR_ERR(se_nacl));
+
+ nacl_cg = &se_nacl->acl_group;
+ nacl_cg->default_groups = se_nacl->acl_default_groups;
+ nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+ nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+ nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+ nacl_cg->default_groups[3] = NULL;
+
+ config_group_init_type_name(&se_nacl->acl_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ config_group_init_type_name(&se_nacl->acl_param_group, "param",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+
+ return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_acl_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_node_acl *se_nacl = container_of(to_config_group(item),
+ struct se_node_acl, acl_group);
+ struct config_item *df_item;
+ struct config_group *nacl_cg;
+ int i;
+
+ nacl_cg = &se_nacl->acl_group;
+ for (i = 0; nacl_cg->default_groups[i]; i++) {
+ df_item = &nacl_cg->default_groups[i]->cg_item;
+ nacl_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+
+ config_item_put(item);
+ tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+ .make_group = target_fabric_make_nodeacl,
+ .drop_item = target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+ .show_attribute = target_fabric_np_base_attr_show,
+ .store_attribute = target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_np_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_tpg_np *se_tpg_np;
+
+ if (!(tf->tf_ops.fabric_make_np)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+ if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+ return ERR_PTR(-EINVAL);
+
+ config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+ return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_np_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+ struct se_tpg_np, tpg_np_group);
+
+ config_item_put(item);
+ tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+ .make_group = &target_fabric_make_np,
+ .drop_item = &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode) \
+static struct target_fabric_port_attribute target_fabric_port_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_fabric_port_show_attr_##_name, \
+ target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name) \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+ &target_fabric_port_alua_tg_pt_gp.attr,
+ &target_fabric_port_alua_tg_pt_offline.attr,
+ &target_fabric_port_alua_tg_pt_status.attr,
+ &target_fabric_port_alua_tg_pt_write_md.attr,
+ NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+ struct config_item *lun_ci,
+ struct config_item *se_dev_ci)
+{
+ struct config_item *tpg_ci;
+ struct se_device *dev;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_lun *lun_p;
+ struct se_portal_group *se_tpg;
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(se_dev_ci), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_fabric_configfs *tf;
+ int ret;
+
+ tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+ se_tpg = container_of(to_config_group(tpg_ci),
+ struct se_portal_group, tpg_group);
+ tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ if (lun->lun_se_dev != NULL) {
+ printk(KERN_ERR "Port Symlink already exists\n");
+ return -EEXIST;
+ }
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev)) {
+ printk(KERN_ERR "Unable to locate struct se_device pointer from"
+ " %s\n", config_item_name(se_dev_ci));
+ ret = -ENODEV;
+ goto out;
+ }
+
+ lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+ lun->unpacked_lun);
+ if ((IS_ERR(lun_p)) || !(lun_p)) {
+ printk(KERN_ERR "core_dev_add_lun() failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tf->tf_ops.fabric_post_link) {
+ /*
+ * Call the optional fabric_post_link() to allow a
+ * fabric module to setup any additional state once
+ * core_dev_add_lun() has been called..
+ */
+ tf->tf_ops.fabric_post_link(se_tpg, lun);
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int target_fabric_port_unlink(
+ struct config_item *lun_ci,
+ struct config_item *se_dev_ci)
+{
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ if (tf->tf_ops.fabric_pre_unlink) {
+ /*
+ * Call the optional fabric_pre_unlink() to allow a
+ * fabric module to release any additional stat before
+ * core_dev_del_lun() is called.
+ */
+ tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+ }
+
+ core_dev_del_lun(se_tpg, lun->unpacked_lun);
+ return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+ .show_attribute = target_fabric_port_attr_show,
+ .store_attribute = target_fabric_port_attr_store,
+ .allow_link = target_fabric_port_link,
+ .drop_link = target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_lun *lun;
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_lun_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ unsigned long unpacked_lun;
+
+ if (strstr(name, "lun_") != name) {
+ printk(KERN_ERR "Unable to locate \'_\" in"
+ " \"lun_$LUN_NUMBER\"\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+ if (!(lun))
+ return ERR_PTR(-EINVAL);
+
+ config_group_init_type_name(&lun->lun_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+
+ return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+ struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+ .make_group = &target_fabric_make_lun,
+ .drop_item = &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+ .show_attribute = target_fabric_tpg_attrib_attr_show,
+ .store_attribute = target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+ .show_attribute = target_fabric_tpg_param_attr_show,
+ .store_attribute = target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+ .show_attribute = target_fabric_tpg_attr_show,
+ .store_attribute = target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+ struct se_portal_group *se_tpg;
+
+ if (!(tf->tf_ops.fabric_make_tpg)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+ if (!(se_tpg) || IS_ERR(se_tpg))
+ return ERR_PTR(-EINVAL);
+ /*
+ * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+ */
+ se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+ se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+ se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+ se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+ se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+ se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+ se_tpg->tpg_group.default_groups[5] = NULL;
+
+ config_group_init_type_name(&se_tpg->tpg_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+ &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+ &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+ &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+ &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+ return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct config_group *tpg_cg = &se_tpg->tpg_group;
+ struct config_item *df_item;
+ int i;
+ /*
+ * Release default groups, but do not release tpg_cg->default_groups
+ * memory as it is statically allocated at se_tpg->tpg_default_groups.
+ */
+ for (i = 0; tpg_cg->default_groups[i]; i++) {
+ df_item = &tpg_cg->default_groups[i]->cg_item;
+ tpg_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+
+ config_item_put(item);
+ tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+ .make_group = target_fabric_make_tpg,
+ .drop_item = target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+ struct config_group *group,
+ const char *name)
+{
+ struct target_fabric_configfs *tf = container_of(group,
+ struct target_fabric_configfs, tf_group);
+ struct se_wwn *wwn;
+
+ if (!(tf->tf_ops.fabric_make_wwn)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+ if (!(wwn) || IS_ERR(wwn))
+ return ERR_PTR(-EINVAL);
+
+ wwn->wwn_tf = tf;
+ config_group_init_type_name(&wwn->wwn_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+
+ return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct target_fabric_configfs *tf = container_of(group,
+ struct target_fabric_configfs, tf_group);
+ struct se_wwn *wwn = container_of(to_config_group(item),
+ struct se_wwn, wwn_group);
+
+ config_item_put(item);
+ tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+ .make_group = target_fabric_make_wwn,
+ .drop_item = target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+ .show_attribute = target_fabric_wwn_attr_show,
+ .store_attribute = target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+ tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+ .show_attribute = target_fabric_discovery_attr_show,
+ .store_attribute = target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+ target_fabric_setup_discovery_cit(tf);
+ target_fabric_setup_wwn_cit(tf);
+ target_fabric_setup_tpg_cit(tf);
+ target_fabric_setup_tpg_base_cit(tf);
+ target_fabric_setup_tpg_port_cit(tf);
+ target_fabric_setup_tpg_lun_cit(tf);
+ target_fabric_setup_tpg_np_cit(tf);
+ target_fabric_setup_tpg_np_base_cit(tf);
+ target_fabric_setup_tpg_attrib_cit(tf);
+ target_fabric_setup_tpg_param_cit(tf);
+ target_fabric_setup_tpg_nacl_cit(tf);
+ target_fabric_setup_tpg_nacl_base_cit(tf);
+ target_fabric_setup_tpg_nacl_attrib_cit(tf);
+ target_fabric_setup_tpg_nacl_auth_cit(tf);
+ target_fabric_setup_tpg_nacl_param_cit(tf);
+ target_fabric_setup_tpg_mappedlun_cit(tf);
+
+ return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 000000000000..26285644e4de
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * Filename: target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * Return a SAS Serial SCSI Protocol identifier for loopback operations
+ * This is defined in section 7.5.1 Table 362 in spc4r17
+ */
+ return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ unsigned char binary, *ptr;
+ int i;
+ u32 off = 4;
+ /*
+ * Set PROTOCOL IDENTIFIER to 6h for SAS
+ */
+ buf[0] = 0x06;
+ /*
+ * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+ * over SAS Serial SCSI Protocol
+ */
+ ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+ for (i = 0; i < 16; i += 2) {
+ binary = transport_asciihex_to_binaryhex(&ptr[i]);
+ buf[off++] = binary;
+ }
+ /*
+ * The SAS Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+ * over SAS Serial SCSI Protocol
+ *
+ * The SAS Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+ * for initiator ports using SCSI over SAS Serial SCSI Protocol
+ *
+ * The TransportID for a SAS Initiator Port is of fixed size of
+ * 24 bytes, and SAS does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * The FC Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ unsigned char binary, *ptr;
+ int i;
+ u32 off = 8;
+ /*
+ * PROTOCOL IDENTIFIER is 0h for FCP-2
+ *
+ * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+ * SCSI over Fibre Channel
+ *
+ * We convert the ASCII formatted N Port name into a binary
+ * encoded TransportID.
+ */
+ ptr = &se_nacl->initiatorname[0];
+
+ for (i = 0; i < 24; ) {
+ if (!(strncmp(&ptr[i], ":", 1))) {
+ i++;
+ continue;
+ }
+ binary = transport_asciihex_to_binaryhex(&ptr[i]);
+ buf[off++] = binary;
+ i += 2;
+ }
+ /*
+ * The FC Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * The TransportID for a FC N Port is of fixed size of
+ * 24 bytes, and FC does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * This value is defined for "Internet SCSI (iSCSI)"
+ * in spc4r17 section 7.5.1 Table 362
+ */
+ return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ u32 off = 4, padding = 0;
+ u16 len = 0;
+
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+ */
+ buf[0] = 0x05;
+ /*
+ * From spc4r17 Section 7.5.4.6: TransportID for initiator
+ * ports using SCSI over iSCSI.
+ *
+ * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+ * shall contain the iSCSI name of an iSCSI initiator node (see
+ * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+ * null character terminates the ISCSI NAME field without regard for
+ * the specified length of the iSCSI TransportID or the contents of
+ * the ADDITIONAL LENGTH field.
+ */
+ len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+ /*
+ * Add Extra byte for NULL terminator
+ */
+ len++;
+ /*
+ * If there is ISID present with the registration and *format code == 1
+ * 1, use iSCSI Initiator port TransportID format.
+ *
+ * Otherwise use iSCSI Initiator device TransportID format that
+ * does not contain the ASCII encoded iSCSI Initiator iSID value
+ * provied by the iSCSi Initiator during the iSCSI login process.
+ */
+ if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+ /*
+ * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+ * format.
+ */
+ buf[0] |= 0x40;
+ /*
+ * From spc4r17 Section 7.5.4.6: TransportID for initiator
+ * ports using SCSI over iSCSI. Table 390
+ *
+ * The SEPARATOR field shall contain the five ASCII
+ * characters ",i,0x".
+ *
+ * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+ * field shall contain the iSCSI initiator session identifier
+ * (see RFC 3720) in the form of ASCII characters that are the
+ * hexadecimal digits converted from the binary iSCSI initiator
+ * session identifier value. The first ISCSI INITIATOR SESSION
+ * ID field byte containing an ASCII null character
+ */
+ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+ buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+ buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+ buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+ len += 5;
+ buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+ buf[off+len] = '\0'; off++;
+ len += 7;
+ }
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+ * in the TransportID. The additional length shall be at least 20 and
+ * shall be a multiple of four.
+ */
+ padding = ((-len) & 3);
+ if (padding != 0)
+ len += padding;
+
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff);
+ /*
+ * Increment value for total payload + header length for
+ * full status descriptor
+ */
+ len += 4;
+
+ return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ u32 len = 0, padding = 0;
+
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ len = strlen(se_nacl->initiatorname);
+ /*
+ * Add extra byte for NULL terminator
+ */
+ len++;
+ /*
+ * If there is ISID present with the registration, use format code:
+ * 01b: iSCSI Initiator port TransportID format
+ *
+ * If there is not an active iSCSI session, use format code:
+ * 00b: iSCSI Initiator device TransportID format
+ */
+ if (pr_reg->isid_present_at_reg) {
+ len += 5; /* For ",i,0x" ASCII seperator */
+ len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+ *format_code = 1;
+ } else
+ *format_code = 0;
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+ * in the TransportID. The additional length shall be at least 20 and
+ * shall be a multiple of four.
+ */
+ padding = ((-len) & 3);
+ if (padding != 0)
+ len += padding;
+ /*
+ * Increment value for total payload + header length for
+ * full status descriptor
+ */
+ len += 4;
+
+ return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ char *p;
+ u32 tid_len, padding;
+ int i;
+ u16 add_len;
+ u8 format_code = (buf[0] & 0xc0);
+ /*
+ * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+ *
+ * TransportID for initiator ports using SCSI over iSCSI,
+ * from Table 388 -- iSCSI TransportID formats.
+ *
+ * 00b Initiator port is identified using the world wide unique
+ * SCSI device name of the iSCSI initiator
+ * device containing the initiator port (see table 389).
+ * 01b Initiator port is identified using the world wide unique
+ * initiator port identifier (see table 390).10b to 11b
+ * Reserved
+ */
+ if ((format_code != 0x00) && (format_code != 0x40)) {
+ printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+ " Initiator Transport ID\n", format_code);
+ return NULL;
+ }
+ /*
+ * If the caller wants the TransportID Length, we set that value for the
+ * entire iSCSI Tarnsport ID now.
+ */
+ if (out_tid_len != NULL) {
+ add_len = ((buf[2] >> 8) & 0xff);
+ add_len |= (buf[3] & 0xff);
+
+ tid_len = strlen((char *)&buf[4]);
+ tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+ tid_len += 1; /* Add one byte for NULL terminator */
+ padding = ((-tid_len) & 3);
+ if (padding != 0)
+ tid_len += padding;
+
+ if ((add_len + 4) != tid_len) {
+ printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+ "does not match calculated tid_len: %u,"
+ " using tid_len instead\n", add_len+4, tid_len);
+ *out_tid_len = tid_len;
+ } else
+ *out_tid_len = (add_len + 4);
+ }
+ /*
+ * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+ * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+ * format.
+ */
+ if (format_code == 0x40) {
+ p = strstr((char *)&buf[4], ",i,0x");
+ if (!(p)) {
+ printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+ " for Initiator port identifier: %s\n",
+ (char *)&buf[4]);
+ return NULL;
+ }
+ *p = '\0'; /* Terminate iSCSI Name */
+ p += 5; /* Skip over ",i,0x" seperator */
+
+ *port_nexus_ptr = p;
+ /*
+ * Go ahead and do the lower case conversion of the received
+ * 12 ASCII characters representing the ISID in the TransportID
+ * for comparision against the running iSCSI session's ISID from
+ * iscsi_target.c:lio_sess_get_initiator_sid()
+ */
+ for (i = 0; i < 12; i++) {
+ if (isdigit(*p)) {
+ p++;
+ continue;
+ }
+ *p = tolower(*p);
+ p++;
+ }
+ }
+
+ return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 000000000000..0aaca885668f
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
+/*******************************************************************************
+ * Filename: target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_file.h"
+
+#if 1
+#define DEBUG_FD_CACHE(x...) printk(x)
+#else
+#define DEBUG_FD_CACHE(x...)
+#endif
+
+#if 1
+#define DEBUG_FD_FUA(x...) printk(x)
+#else
+#define DEBUG_FD_FUA(x...)
+#endif
+
+static struct se_subsystem_api fileio_template;
+
+/* fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct fd_host *fd_host;
+
+ fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+ if (!(fd_host)) {
+ printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+ return -1;
+ }
+
+ fd_host->fd_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) fd_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+ TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+ " Target Core with TCQ Depth: %d MaxSectors: %u\n",
+ hba->hba_id, fd_host->fd_host_id,
+ atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+
+ return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+ struct fd_host *fd_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+ " Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+ kfree(fd_host);
+ hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct fd_dev *fd_dev;
+ struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+
+ fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+ if (!(fd_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+ return NULL;
+ }
+
+ fd_dev->fd_host = fd_host;
+
+ printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+
+ return fd_dev;
+}
+
+/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ char *dev_p = NULL;
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct queue_limits *limits;
+ struct fd_dev *fd_dev = (struct fd_dev *) p;
+ struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ mm_segment_t old_fs;
+ struct file *file;
+ struct inode *inode = NULL;
+ int dev_flags = 0, flags;
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ dev_p = getname(fd_dev->fd_dev_name);
+ set_fs(old_fs);
+
+ if (IS_ERR(dev_p)) {
+ printk(KERN_ERR "getname(%s) failed: %lu\n",
+ fd_dev->fd_dev_name, IS_ERR(dev_p));
+ goto fail;
+ }
+#if 0
+ if (di->no_create_file)
+ flags = O_RDWR | O_LARGEFILE;
+ else
+ flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#else
+ flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#endif
+/* flags |= O_DIRECT; */
+ /*
+ * If fd_buffered_io=1 has not been set explictly (the default),
+ * use O_SYNC to force FILEIO writes to disk.
+ */
+ if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+ flags |= O_SYNC;
+
+ file = filp_open(dev_p, flags, 0600);
+
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ goto fail;
+ }
+ fd_dev->fd_file = file;
+ /*
+ * If using a block backend with this struct file, we extract
+ * fd_dev->fd_[block,dev]_size from struct block_device.
+ *
+ * Otherwise, we use the passed fd_size= from configfs
+ */
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ struct request_queue *q;
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = bdev_get_queue(inode->i_bdev);
+ limits = &dev_limits.limits;
+ limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+ limits->max_hw_sectors = queue_max_hw_sectors(q);
+ limits->max_sectors = queue_max_sectors(q);
+ /*
+ * Determine the number of bytes from i_size_read() minus
+ * one (1) logical sector from underlying struct block_device
+ */
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+ fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+ fd_dev->fd_block_size);
+
+ printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+ " block_device blocks: %llu logical_block_size: %d\n",
+ fd_dev->fd_dev_size,
+ div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+ fd_dev->fd_block_size);
+ } else {
+ if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+ printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+ " parameter, and no backing struct"
+ " block_device\n");
+ goto fail;
+ }
+
+ limits = &dev_limits.limits;
+ limits->logical_block_size = FD_BLOCKSIZE;
+ limits->max_hw_sectors = FD_MAX_SECTORS;
+ limits->max_sectors = FD_MAX_SECTORS;
+ fd_dev->fd_block_size = FD_BLOCKSIZE;
+ }
+
+ dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+ dev = transport_add_device_to_core_hba(hba, &fileio_template,
+ se_dev, dev_flags, (void *)fd_dev,
+ &dev_limits, "FILEIO", FD_VERSION);
+ if (!(dev))
+ goto fail;
+
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+ printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+ " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+ putname(dev_p);
+ return dev;
+fail:
+ if (fd_dev->fd_file) {
+ filp_close(fd_dev->fd_file, NULL);
+ fd_dev->fd_file = NULL;
+ }
+ putname(dev_p);
+ return NULL;
+}
+
+/* fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+ struct fd_dev *fd_dev = (struct fd_dev *) p;
+
+ if (fd_dev->fd_file) {
+ filp_close(fd_dev->fd_file, NULL);
+ fd_dev->fd_file = NULL;
+ }
+
+ kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+ return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(struct se_cmd *cmd)
+{
+ struct fd_request *fd_req;
+
+ fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+ if (!(fd_req)) {
+ printk(KERN_ERR "Unable to allocate struct fd_request\n");
+ return NULL;
+ }
+
+ fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
+
+ return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+ struct file *fd = req->fd_dev->fd_file;
+ struct scatterlist *sg = task->task_sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ int ret = 0, i;
+
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+ if (!(iov)) {
+ printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+ return -1;
+ }
+
+ for (i = 0; i < task->task_sg_num; i++) {
+ iov[i].iov_len = sg[i].length;
+ iov[i].iov_base = sg_virt(&sg[i]);
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+ set_fs(old_fs);
+
+ kfree(iov);
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+ * the expected virt_size for struct file w/o a backing struct
+ * block_device.
+ */
+ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (ret < 0 || ret != task->task_size) {
+ printk(KERN_ERR "vfs_readv() returned %d,"
+ " expecting %d for S_ISBLK\n", ret,
+ (int)task->task_size);
+ return -1;
+ }
+ } else {
+ if (ret < 0) {
+ printk(KERN_ERR "vfs_readv() returned %d for non"
+ " S_ISBLK\n", ret);
+ return -1;
+ }
+ }
+
+ return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+ struct file *fd = req->fd_dev->fd_file;
+ struct scatterlist *sg = task->task_sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ int ret, i = 0;
+
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+ if (!(iov)) {
+ printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+ return -1;
+ }
+
+ for (i = 0; i < task->task_sg_num; i++) {
+ iov[i].iov_len = sg[i].length;
+ iov[i].iov_base = sg_virt(&sg[i]);
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+ set_fs(old_fs);
+
+ kfree(iov);
+
+ if (ret < 0 || ret != task->task_size) {
+ printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+ return -1;
+ }
+
+ return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+ loff_t start, end;
+ int ret;
+
+ /*
+ * If the Immediate bit is set, queue up the GOOD response
+ * for this SYNCHRONIZE_CACHE op
+ */
+ if (immed)
+ transport_complete_sync_cache(cmd, 1);
+
+ /*
+ * Determine if we will be flushing the entire device.
+ */
+ if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+ start = 0;
+ end = LLONG_MAX;
+ } else {
+ start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+ if (cmd->data_length)
+ end = start + cmd->data_length;
+ else
+ end = LLONG_MAX;
+ }
+
+ ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ if (ret != 0)
+ printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+ if (!immed)
+ transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int fd_emulated_write_cache(struct se_device *dev)
+{
+ return 1;
+}
+
+static int fd_emulated_dpo(struct se_device *dev)
+{
+ return 0;
+}
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int fd_emulated_fua_write(struct se_device *dev)
+{
+ return 1;
+}
+
+static int fd_emulated_fua_read(struct se_device *dev)
+{
+ return 0;
+}
+
+/*
+ * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+ * LBA range basis..
+ */
+static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+ loff_t end = start + task->task_size;
+ int ret;
+
+ DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+ task->task_lba, task->task_size);
+
+ ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ if (ret != 0)
+ printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ int ret = 0;
+
+ /*
+ * Call vectorized fileio functions to map struct scatterlist
+ * physical memory addresses to struct iovec virtual memory.
+ */
+ if (task->task_data_direction == DMA_FROM_DEVICE) {
+ ret = fd_do_readv(task);
+ } else {
+ ret = fd_do_writev(task);
+
+ if (ret > 0 &&
+ DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
+ DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+ T_TASK(cmd)->t_tasks_fua) {
+ /*
+ * We might need to be a bit smarter here
+ * and return some sense data to let the initiator
+ * know the FUA WRITE cache sync failed..?
+ */
+ fd_emulate_write_fua(cmd, task);
+ }
+
+ }
+
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+
+ kfree(req);
+}
+
+enum {
+ Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
+ {Opt_fd_buffered_io, "fd_buffered_id=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page, ssize_t count)
+{
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_fd_dev_name:
+ snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+ "%s", match_strdup(&args[0]));
+ printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+ fd_dev->fd_dev_name);
+ fd_dev->fbd_flags |= FBDF_HAS_PATH;
+ break;
+ case Opt_fd_dev_size:
+ arg_p = match_strdup(&args[0]);
+ ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoull() failed for"
+ " fd_dev_size=\n");
+ goto out;
+ }
+ printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
+ case Opt_fd_buffered_io:
+ match_int(args, &arg);
+ if (arg != 1) {
+ printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_INFO "FILEIO: Using buffered I/O"
+ " operations for struct fd_dev\n");
+
+ fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+ struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+
+ if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+ printk(KERN_ERR "Missing fd_dev_name=\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+ (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+ "Buffered" : "Synchronous");
+ return bl;
+}
+
+/* fd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *fd_get_cdb(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+
+ return req->fd_scsi_cdb;
+}
+
+/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/* fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+ DEV_ATTRIB(dev)->block_size);
+
+ return blocks_long;
+}
+
+static struct se_subsystem_api fileio_template = {
+ .name = "fileio",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .attach_hba = fd_attach_hba,
+ .detach_hba = fd_detach_hba,
+ .allocate_virtdevice = fd_allocate_virtdevice,
+ .create_virtdevice = fd_create_virtdevice,
+ .free_device = fd_free_device,
+ .dpo_emulated = fd_emulated_dpo,
+ .fua_write_emulated = fd_emulated_fua_write,
+ .fua_read_emulated = fd_emulated_fua_read,
+ .write_cache_emulated = fd_emulated_write_cache,
+ .alloc_task = fd_alloc_task,
+ .do_task = fd_do_task,
+ .do_sync_cache = fd_emulate_sync_cache,
+ .free_task = fd_free_task,
+ .check_configfs_dev_params = fd_check_configfs_dev_params,
+ .set_configfs_dev_params = fd_set_configfs_dev_params,
+ .show_configfs_dev_params = fd_show_configfs_dev_params,
+ .get_cdb = fd_get_cdb,
+ .get_device_rev = fd_get_device_rev,
+ .get_device_type = fd_get_device_type,
+ .get_blocks = fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+ return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+ transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 000000000000..ef4de2b4bd46
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION "4.0"
+
+#define FD_MAX_DEV_NAME 256
+/* Maximum queuedepth for the FILEIO HBA */
+#define FD_HBA_QUEUE_DEPTH 256
+#define FD_DEVICE_QUEUE_DEPTH 32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE 512
+#define FD_MAX_SECTORS 1024
+
+#define RRF_EMULATE_CDB 0x01
+#define RRF_GOT_LBA 0x02
+
+struct fd_request {
+ struct se_task fd_task;
+ /* SCSI CDB from iSCSI Command PDU */
+ unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ /* FILEIO device */
+ struct fd_dev *fd_dev;
+} ____cacheline_aligned;
+
+#define FBDF_HAS_PATH 0x01
+#define FBDF_HAS_SIZE 0x02
+#define FDBD_USE_BUFFERED_IO 0x04
+
+struct fd_dev {
+ u32 fbd_flags;
+ unsigned char fd_dev_name[FD_MAX_DEV_NAME];
+ /* Unique Ramdisk Device ID in Ramdisk HBA */
+ u32 fd_dev_id;
+ /* Number of SG tables in sg_table_array */
+ u32 fd_table_count;
+ u32 fd_queue_depth;
+ u32 fd_block_size;
+ unsigned long long fd_dev_size;
+ struct file *fd_file;
+ /* FILEIO HBA device is connected to */
+ struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+ u32 fd_host_dev_id_count;
+ /* Unique FILEIO Host ID */
+ u32 fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 000000000000..4bbe8208b241
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
+/*******************************************************************************
+ * Filename: target_core_hba.c
+ *
+ * This file copntains the iSCSI HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_hba.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+ struct se_subsystem_api *s;
+
+ INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+ mutex_lock(&subsystem_mutex);
+ list_for_each_entry(s, &subsystem_list, sub_api_list) {
+ if (!(strcmp(s->name, sub_api->name))) {
+ printk(KERN_ERR "%p is already registered with"
+ " duplicate name %s, unable to process"
+ " request\n", s, s->name);
+ mutex_unlock(&subsystem_mutex);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+ mutex_unlock(&subsystem_mutex);
+
+ printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+ " %p\n", sub_api->name, sub_api->owner);
+ return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+ mutex_lock(&subsystem_mutex);
+ list_del(&sub_api->sub_api_list);
+ mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+ struct se_subsystem_api *s;
+
+ mutex_lock(&subsystem_mutex);
+ list_for_each_entry(s, &subsystem_list, sub_api_list) {
+ if (!strcmp(s->name, sub_name))
+ goto found;
+ }
+ mutex_unlock(&subsystem_mutex);
+ return NULL;
+found:
+ if (s->owner && !try_module_get(s->owner))
+ s = NULL;
+ mutex_unlock(&subsystem_mutex);
+ return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+ struct se_hba *hba;
+ int ret = 0;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR "Unable to allocate struct se_hba\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&hba->hba_dev_list);
+ spin_lock_init(&hba->device_lock);
+ spin_lock_init(&hba->hba_queue_lock);
+ mutex_init(&hba->hba_access_mutex);
+
+ hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+ hba->hba_flags |= hba_flags;
+
+ atomic_set(&hba->max_queue_depth, 0);
+ atomic_set(&hba->left_queue_depth, 0);
+
+ hba->transport = core_get_backend(plugin_name);
+ if (!hba->transport) {
+ ret = -EINVAL;
+ goto out_free_hba;
+ }
+
+ ret = hba->transport->attach_hba(hba, plugin_dep_id);
+ if (ret < 0)
+ goto out_module_put;
+
+ spin_lock(&se_global->hba_lock);
+ hba->hba_id = se_global->g_hba_id_counter++;
+ list_add_tail(&hba->hba_list, &se_global->g_hba_list);
+ spin_unlock(&se_global->hba_lock);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+ " Core\n", hba->hba_id);
+
+ return hba;
+
+out_module_put:
+ if (hba->transport->owner)
+ module_put(hba->transport->owner);
+ hba->transport = NULL;
+out_free_hba:
+ kfree(hba);
+ return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+ struct se_device *dev, *dev_tmp;
+
+ spin_lock(&hba->device_lock);
+ list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
+
+ se_clear_dev_ports(dev);
+ spin_unlock(&hba->device_lock);
+
+ se_release_device_for_hba(dev);
+
+ spin_lock(&hba->device_lock);
+ }
+ spin_unlock(&hba->device_lock);
+
+ hba->transport->detach_hba(hba);
+
+ spin_lock(&se_global->hba_lock);
+ list_del(&hba->hba_list);
+ spin_unlock(&se_global->hba_lock);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+ " Core\n", hba->hba_id);
+
+ if (hba->transport->owner)
+ module_put(hba->transport->owner);
+
+ hba->transport = NULL;
+ kfree(hba);
+ return 0;
+}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 000000000000..bb0fea5f730c
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
+#ifndef TARGET_CORE_HBA_H
+#define TARGET_CORE_HBA_H
+
+extern struct se_hba *core_alloc_hba(const char *, u32, u32);
+extern int core_delete_hba(struct se_hba *);
+
+#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 000000000000..c6e0d757e76e
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,808 @@
+/*******************************************************************************
+ * Filename: target_core_iblock.c
+ *
+ * This file contains the Storage Engine <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_iblock.h"
+
+#if 0
+#define DEBUG_IBLOCK(x...) printk(x)
+#else
+#define DEBUG_IBLOCK(x...)
+#endif
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct iblock_hba *ib_host;
+
+ ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
+ if (!(ib_host)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct iblock_hba\n");
+ return -ENOMEM;
+ }
+
+ ib_host->iblock_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) ib_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
+ " Target Core TCQ Depth: %d\n", hba->hba_id,
+ ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+
+ return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+ struct iblock_hba *ib_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+ " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
+
+ kfree(ib_host);
+ hba->hba_ptr = NULL;
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct iblock_dev *ib_dev = NULL;
+ struct iblock_hba *ib_host = hba->hba_ptr;
+
+ ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+ if (!(ib_dev)) {
+ printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+ return NULL;
+ }
+ ib_dev->ibd_host = ib_host;
+
+ printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
+
+ return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ struct iblock_dev *ib_dev = p;
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct block_device *bd = NULL;
+ struct request_queue *q;
+ struct queue_limits *limits;
+ u32 dev_flags = 0;
+
+ if (!(ib_dev)) {
+ printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+ return 0;
+ }
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ /*
+ * These settings need to be made tunable..
+ */
+ ib_dev->ibd_bio_set = bioset_create(32, 64);
+ if (!(ib_dev->ibd_bio_set)) {
+ printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+ return 0;
+ }
+ printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+ /*
+ * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+ * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+ */
+ printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
+ ib_dev->ibd_udev_path);
+
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+ if (!(bd))
+ goto failed;
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = bdev_get_queue(bd);
+ limits = &dev_limits.limits;
+ limits->logical_block_size = bdev_logical_block_size(bd);
+ limits->max_hw_sectors = queue_max_hw_sectors(q);
+ limits->max_sectors = queue_max_sectors(q);
+ dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+
+ ib_dev->ibd_major = MAJOR(bd->bd_dev);
+ ib_dev->ibd_minor = MINOR(bd->bd_dev);
+ ib_dev->ibd_bd = bd;
+
+ dev = transport_add_device_to_core_hba(hba,
+ &iblock_template, se_dev, dev_flags, (void *)ib_dev,
+ &dev_limits, "IBLOCK", IBLOCK_VERSION);
+ if (!(dev))
+ goto failed;
+
+ ib_dev->ibd_depth = dev->queue_depth;
+
+ /*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+ if (blk_queue_discard(bdev_get_queue(bd))) {
+ struct request_queue *q = bdev_get_queue(bd);
+
+ DEV_ATTRIB(dev)->max_unmap_lba_count =
+ q->limits.max_discard_sectors;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
+ DEV_ATTRIB(dev)->unmap_granularity =
+ q->limits.discard_granularity;
+ DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ q->limits.discard_alignment;
+
+ printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+ " disabled by default\n");
+ }
+
+ return dev;
+
+failed:
+ if (ib_dev->ibd_bio_set) {
+ bioset_free(ib_dev->ibd_bio_set);
+ ib_dev->ibd_bio_set = NULL;
+ }
+ ib_dev->ibd_bd = NULL;
+ ib_dev->ibd_major = 0;
+ ib_dev->ibd_minor = 0;
+ return NULL;
+}
+
+static void iblock_free_device(void *p)
+{
+ struct iblock_dev *ib_dev = p;
+
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ bioset_free(ib_dev->ibd_bio_set);
+ kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+ return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(struct se_cmd *cmd)
+{
+ struct iblock_req *ib_req;
+
+ ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!(ib_req)) {
+ printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+ return NULL;
+ }
+
+ ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
+ atomic_set(&ib_req->ib_bio_cnt, 0);
+ return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+ struct se_device *dev,
+ struct block_device *bd,
+ struct request_queue *q)
+{
+ unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+ bdev_logical_block_size(bd)) - 1);
+ u32 block_size = bdev_logical_block_size(bd);
+
+ if (block_size == DEV_ATTRIB(dev)->block_size)
+ return blocks_long;
+
+ switch (block_size) {
+ case 4096:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 2048:
+ blocks_long <<= 1;
+ break;
+ case 1024:
+ blocks_long <<= 2;
+ break;
+ case 512:
+ blocks_long <<= 3;
+ default:
+ break;
+ }
+ break;
+ case 2048:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 1;
+ break;
+ case 1024:
+ blocks_long <<= 1;
+ break;
+ case 512:
+ blocks_long <<= 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1024:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 2;
+ break;
+ case 2048:
+ blocks_long >>= 1;
+ break;
+ case 512:
+ blocks_long <<= 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 512:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 3;
+ break;
+ case 2048:
+ blocks_long >>= 2;
+ break;
+ case 1024:
+ blocks_long >>= 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return blocks_long;
+}
+
+/*
+ * Emulate SYCHRONIZE_CACHE_*
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+ int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+ sector_t error_sector;
+ int ret;
+
+ /*
+ * If the Immediate bit is set, queue up the GOOD response
+ * for this SYNCHRONIZE_CACHE op
+ */
+ if (immed)
+ transport_complete_sync_cache(cmd, 1);
+
+ /*
+ * blkdev_issue_flush() does not support a specifying a range, so
+ * we have to flush the entire cache.
+ */
+ ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
+ if (ret != 0) {
+ printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+ " error_sector: %llu\n", ret,
+ (unsigned long long)error_sector);
+ }
+
+ if (!immed)
+ transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int iblock_emulated_write_cache(struct se_device *dev)
+{
+ return 1;
+}
+
+static int iblock_emulated_dpo(struct se_device *dev)
+{
+ return 0;
+}
+
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int iblock_emulated_fua_write(struct se_device *dev)
+{
+ return 1;
+}
+
+static int iblock_emulated_fua_read(struct se_device *dev)
+{
+ return 0;
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+ struct se_device *dev = task->task_se_cmd->se_dev;
+ struct iblock_req *req = IBLOCK_REQ(task);
+ struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
+ struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
+ struct bio *bio = req->ib_bio, *nbio = NULL;
+ int rw;
+
+ if (task->task_data_direction == DMA_TO_DEVICE) {
+ /*
+ * Force data to disk if we pretend to not have a volatile
+ * write cache, or the initiator set the Force Unit Access bit.
+ */
+ if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+ T_TASK(task->task_se_cmd)->t_tasks_fua))
+ rw = WRITE_FUA;
+ else
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+
+ while (bio) {
+ nbio = bio->bi_next;
+ bio->bi_next = NULL;
+ DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
+ " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+
+ submit_bio(rw, bio);
+ bio = nbio;
+ }
+
+ if (q->unplug_fn)
+ q->unplug_fn(q);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+ struct iblock_dev *ibd = dev->dev_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ int barrier = 0;
+
+ return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+ struct iblock_req *req = IBLOCK_REQ(task);
+ struct bio *bio, *hbio = req->ib_bio;
+ /*
+ * We only release the bio(s) here if iblock_bio_done() has not called
+ * bio_put() -> iblock_bio_destructor().
+ */
+ while (hbio != NULL) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_put(bio);
+ }
+
+ kfree(req);
+}
+
+enum {
+ Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_udev_path, "udev_path=%s"},
+ {Opt_force, "force=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page, ssize_t count)
+{
+ struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_udev_path:
+ if (ib_dev->ibd_bd) {
+ printk(KERN_ERR "Unable to set udev_path= while"
+ " ib_dev->ibd_bd exists\n");
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+ "%s", match_strdup(&args[0]));
+ printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+ ib_dev->ibd_udev_path);
+ ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+ break;
+ case Opt_force:
+ match_int(args, &arg);
+ ib_dev->ibd_force = arg;
+ printk(KERN_INFO "IBLOCK: Set force=%d\n",
+ ib_dev->ibd_force);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev)
+{
+ struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+ if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+ printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ char buf[BDEVNAME_SIZE];
+ ssize_t bl = 0;
+
+ if (bd)
+ bl += sprintf(b + bl, "iBlock device: %s",
+ bdevname(bd, buf));
+ if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+ bl += sprintf(b + bl, " UDEV PATH: %s\n",
+ ibd->ibd_udev_path);
+ } else
+ bl += sprintf(b + bl, "\n");
+
+ bl += sprintf(b + bl, " ");
+ if (bd) {
+ bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
+ ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+ "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+ "CLAIMED: IBLOCK" : "CLAIMED: OS");
+ } else {
+ bl += sprintf(b + bl, "Major: %d Minor: %d\n",
+ ibd->ibd_major, ibd->ibd_minor);
+ }
+
+ return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+ struct se_task *task = bio->bi_private;
+ struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+
+ bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *iblock_get_bio(
+ struct se_task *task,
+ struct iblock_req *ib_req,
+ struct iblock_dev *ib_dev,
+ int *ret,
+ sector_t lba,
+ u32 sg_num)
+{
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ if (!(bio)) {
+ printk(KERN_ERR "Unable to allocate memory for bio\n");
+ *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return NULL;
+ }
+
+ DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
+ " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
+ DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+ bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_private = (void *) task;
+ bio->bi_destructor = iblock_bio_destructor;
+ bio->bi_end_io = &iblock_bio_done;
+ bio->bi_sector = lba;
+ atomic_inc(&ib_req->ib_bio_cnt);
+
+ DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
+ DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+ atomic_read(&ib_req->ib_bio_cnt));
+ return bio;
+}
+
+static int iblock_map_task_SG(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = SE_DEV(cmd);
+ struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+ struct iblock_req *ib_req = IBLOCK_REQ(task);
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct scatterlist *sg;
+ int ret = 0;
+ u32 i, sg_num = task->task_sg_num;
+ sector_t block_lba;
+ /*
+ * Do starting conversion up from non 512-byte blocksize with
+ * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+ */
+ if (DEV_ATTRIB(dev)->block_size == 4096)
+ block_lba = (task->task_lba << 3);
+ else if (DEV_ATTRIB(dev)->block_size == 2048)
+ block_lba = (task->task_lba << 2);
+ else if (DEV_ATTRIB(dev)->block_size == 1024)
+ block_lba = (task->task_lba << 1);
+ else if (DEV_ATTRIB(dev)->block_size == 512)
+ block_lba = task->task_lba;
+ else {
+ printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+ " %u\n", DEV_ATTRIB(dev)->block_size);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+ if (!(bio))
+ return ret;
+
+ ib_req->ib_bio = bio;
+ hbio = tbio = bio;
+ /*
+ * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
+ * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+ */
+ for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
+ DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+ " %p len: %u offset: %u\n", task, bio, sg_page(sg),
+ sg->length, sg->offset);
+again:
+ ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
+ if (ret != sg->length) {
+
+ DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
+ bio->bi_sector);
+ DEBUG_IBLOCK("** task->task_size: %u\n",
+ task->task_size);
+ DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+ bio->bi_max_vecs);
+ DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+ bio->bi_vcnt);
+
+ bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
+ block_lba, sg_num);
+ if (!(bio))
+ goto fail;
+
+ tbio = tbio->bi_next = bio;
+ DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+ " list, Going to again\n", bio);
+ goto again;
+ }
+ /* Always in 512 byte units for Linux/Block */
+ block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ sg_num--;
+ DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+ " sg_num to %u\n", task, sg_num);
+ DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
+ " to %llu\n", task, block_lba);
+ DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+ " %u\n", task, bio->bi_vcnt);
+ }
+
+ return 0;
+fail:
+ while (hbio) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_put(bio);
+ }
+ return ret;
+}
+
+static unsigned char *iblock_get_cdb(struct se_task *task)
+{
+ return IBLOCK_REQ(task)->ib_scsi_cdb;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+ struct iblock_dev *ibd = dev->dev_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
+
+ return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+ struct se_task *task = bio->bi_private;
+ struct iblock_req *ibr = IBLOCK_REQ(task);
+ /*
+ * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+ */
+ if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+ err = -EIO;
+
+ if (err != 0) {
+ printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+ " err: %d\n", bio, err);
+ /*
+ * Bump the ib_bio_err_cnt and release bio.
+ */
+ atomic_inc(&ibr->ib_bio_err_cnt);
+ smp_mb__after_atomic_inc();
+ bio_put(bio);
+ /*
+ * Wait to complete the task until the last bio as completed.
+ */
+ if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ return;
+
+ ibr->ib_bio = NULL;
+ transport_complete_task(task, 0);
+ return;
+ }
+ DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba, bio->bi_sector, err);
+ /*
+ * bio_put() will call iblock_bio_destructor() to release the bio back
+ * to ibr->ib_bio_set.
+ */
+ bio_put(bio);
+ /*
+ * Wait to complete the task until the last bio as completed.
+ */
+ if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ return;
+ /*
+ * Return GOOD status for task if zero ib_bio_err_cnt exists.
+ */
+ ibr->ib_bio = NULL;
+ transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+}
+
+static struct se_subsystem_api iblock_template = {
+ .name = "iblock",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .map_task_SG = iblock_map_task_SG,
+ .attach_hba = iblock_attach_hba,
+ .detach_hba = iblock_detach_hba,
+ .allocate_virtdevice = iblock_allocate_virtdevice,
+ .create_virtdevice = iblock_create_virtdevice,
+ .free_device = iblock_free_device,
+ .dpo_emulated = iblock_emulated_dpo,
+ .fua_write_emulated = iblock_emulated_fua_write,
+ .fua_read_emulated = iblock_emulated_fua_read,
+ .write_cache_emulated = iblock_emulated_write_cache,
+ .alloc_task = iblock_alloc_task,
+ .do_task = iblock_do_task,
+ .do_discard = iblock_do_discard,
+ .do_sync_cache = iblock_emulate_sync_cache,
+ .free_task = iblock_free_task,
+ .check_configfs_dev_params = iblock_check_configfs_dev_params,
+ .set_configfs_dev_params = iblock_set_configfs_dev_params,
+ .show_configfs_dev_params = iblock_show_configfs_dev_params,
+ .get_cdb = iblock_get_cdb,
+ .get_device_rev = iblock_get_device_rev,
+ .get_device_type = iblock_get_device_type,
+ .get_blocks = iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+ return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+ transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 000000000000..64c1f4d69f76
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION "4.0"
+
+#define IBLOCK_HBA_QUEUE_DEPTH 512
+#define IBLOCK_DEVICE_QUEUE_DEPTH 32
+#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
+#define IBLOCK_MAX_CDBS 16
+#define IBLOCK_LBA_SHIFT 9
+
+struct iblock_req {
+ struct se_task ib_task;
+ unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ atomic_t ib_bio_cnt;
+ atomic_t ib_bio_err_cnt;
+ struct bio *ib_bio;
+ struct iblock_dev *ib_dev;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH 0x01
+#define IBDF_HAS_FORCE 0x02
+
+struct iblock_dev {
+ unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+ int ibd_force;
+ int ibd_major;
+ int ibd_minor;
+ u32 ibd_depth;
+ u32 ibd_flags;
+ struct bio_set *ibd_bio_set;
+ struct block_device *ibd_bd;
+ struct iblock_hba *ibd_host;
+} ____cacheline_aligned;
+
+struct iblock_hba {
+ int iblock_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
new file mode 100644
index 000000000000..d5a48aa0d2d1
--- /dev/null
+++ b/drivers/target/target_core_mib.c
@@ -0,0 +1,1078 @@
+/*******************************************************************************
+ * Filename: target_core_mib.c
+ *
+ * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_mib.h"
+
+/* SCSI mib table index */
+static struct scsi_index_table scsi_index_table;
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* SCSI Instance Table */
+#define SCSI_INST_SW_INDEX 1
+#define SCSI_TRANSPORT_INDEX 1
+
+#define NONE "None"
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+static inline int list_is_first(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->prev == head;
+}
+
+static void *locate_hba_start(
+ struct seq_file *seq,
+ loff_t *pos)
+{
+ spin_lock(&se_global->g_device_lock);
+ return seq_list_start(&se_global->g_se_dev_list, *pos);
+}
+
+static void *locate_hba_next(
+ struct seq_file *seq,
+ void *v,
+ loff_t *pos)
+{
+ return seq_list_next(v, &se_global->g_se_dev_list, pos);
+}
+
+static void locate_hba_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock(&se_global->g_device_lock);
+}
+
+/****************************************************************************
+ * SCSI MIB Tables
+ ****************************************************************************/
+
+/*
+ * SCSI Instance Table
+ */
+static void *scsi_inst_seq_start(
+ struct seq_file *seq,
+ loff_t *pos)
+{
+ spin_lock(&se_global->hba_lock);
+ return seq_list_start(&se_global->g_hba_list, *pos);
+}
+
+static void *scsi_inst_seq_next(
+ struct seq_file *seq,
+ void *v,
+ loff_t *pos)
+{
+ return seq_list_next(v, &se_global->g_hba_list, pos);
+}
+
+static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock(&se_global->hba_lock);
+}
+
+static int scsi_inst_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
+
+ if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
+ seq_puts(seq, "inst sw_indx\n");
+
+ seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
+ seq_printf(seq, "plugin: %s version: %s\n",
+ hba->transport->name, TARGET_CORE_VERSION);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_inst_seq_ops = {
+ .start = scsi_inst_seq_start,
+ .next = scsi_inst_seq_next,
+ .stop = scsi_inst_seq_stop,
+ .show = scsi_inst_seq_show
+};
+
+static int scsi_inst_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_inst_seq_ops);
+}
+
+static const struct file_operations scsi_inst_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_inst_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Device Table
+ */
+static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+static int scsi_dev_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ char str[28];
+ int k;
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst indx role ports\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
+ dev->dev_index, "Target", dev->dev_port_count);
+
+ memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+
+ /* vendor */
+ for (k = 0; k < 8; k++)
+ str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
+ DEV_T10_WWN(dev)->vendor[k] : 0x20;
+ str[k] = 0x20;
+
+ /* model */
+ for (k = 0; k < 16; k++)
+ str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
+ DEV_T10_WWN(dev)->model[k] : 0x20;
+ str[k + 9] = 0;
+
+ seq_printf(seq, "dev_alias: %s\n", str);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_dev_seq_ops = {
+ .start = scsi_dev_seq_start,
+ .next = scsi_dev_seq_next,
+ .stop = scsi_dev_seq_stop,
+ .show = scsi_dev_seq_show
+};
+
+static int scsi_dev_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_dev_seq_ops);
+}
+
+static const struct file_operations scsi_dev_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_dev_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Port Table
+ */
+static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_port_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+static int scsi_port_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ struct se_port *sep, *sep_tmp;
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst device indx role busy_count\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ /* FIXME: scsiPortBusyStatuses count */
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+ seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
+ dev->dev_index, sep->sep_index, "Device",
+ dev->dev_index, 0);
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_port_seq_ops = {
+ .start = scsi_port_seq_start,
+ .next = scsi_port_seq_next,
+ .stop = scsi_port_seq_stop,
+ .show = scsi_port_seq_show
+};
+
+static int scsi_port_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_port_seq_ops);
+}
+
+static const struct file_operations scsi_port_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_port_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Transport Table
+ */
+static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+static int scsi_transport_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ struct se_port *se, *se_tmp;
+ struct se_portal_group *tpg;
+ struct t10_wwn *wwn;
+ char buf[64];
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst device indx dev_name\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ wwn = DEV_T10_WWN(dev);
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
+ tpg = se->sep_tpg;
+ sprintf(buf, "scsiTransport%s",
+ TPG_TFO(tpg)->get_fabric_name());
+
+ seq_printf(seq, "%u %s %u %s+%s\n",
+ hba->hba_index, /* scsiTransportIndex */
+ buf, /* scsiTransportType */
+ (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
+ TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
+ 0,
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ (strlen(wwn->unit_serial)) ?
+ /* scsiTransportDevName */
+ wwn->unit_serial : wwn->vendor);
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_transport_seq_ops = {
+ .start = scsi_transport_seq_start,
+ .next = scsi_transport_seq_next,
+ .stop = scsi_transport_seq_stop,
+ .show = scsi_transport_seq_show
+};
+
+static int scsi_transport_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_transport_seq_ops);
+}
+
+static const struct file_operations scsi_transport_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_transport_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+
+#define LU_COUNT 1 /* for now */
+static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ int non_accessible_lus = 0;
+ char status[16];
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst indx num_LUs status non_access_LUs"
+ " resets\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ switch (dev->dev_status) {
+ case TRANSPORT_DEVICE_ACTIVATED:
+ strcpy(status, "activated");
+ break;
+ case TRANSPORT_DEVICE_DEACTIVATED:
+ strcpy(status, "deactivated");
+ non_accessible_lus = 1;
+ break;
+ case TRANSPORT_DEVICE_SHUTDOWN:
+ strcpy(status, "shutdown");
+ non_accessible_lus = 1;
+ break;
+ case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+ case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+ strcpy(status, "offline");
+ non_accessible_lus = 1;
+ break;
+ default:
+ sprintf(status, "unknown(%d)", dev->dev_status);
+ non_accessible_lus = 1;
+ }
+
+ seq_printf(seq, "%u %u %u %s %u %u\n",
+ hba->hba_index, dev->dev_index, LU_COUNT,
+ status, non_accessible_lus, dev->num_resets);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_tgt_dev_seq_ops = {
+ .start = scsi_tgt_dev_seq_start,
+ .next = scsi_tgt_dev_seq_next,
+ .stop = scsi_tgt_dev_seq_stop,
+ .show = scsi_tgt_dev_seq_show
+};
+
+static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_tgt_dev_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_dev_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_tgt_dev_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ struct se_port *sep, *sep_tmp;
+ struct se_portal_group *tpg;
+ u32 rx_mbytes, tx_mbytes;
+ unsigned long long num_cmds;
+ char buf[64];
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst device indx name port_index in_cmds"
+ " write_mbytes read_mbytes hs_in_cmds\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+ tpg = sep->sep_tpg;
+ sprintf(buf, "%sPort#",
+ TPG_TFO(tpg)->get_fabric_name());
+
+ seq_printf(seq, "%u %u %u %s%d %s%s%d ",
+ hba->hba_index,
+ dev->dev_index,
+ sep->sep_index,
+ buf, sep->sep_index,
+ TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+ spin_lock(&sep->sep_lun->lun_sep_lock);
+ num_cmds = sep->sep_stats.cmd_pdus;
+ rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
+ tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
+ spin_unlock(&sep->sep_lun->lun_sep_lock);
+
+ seq_printf(seq, "%llu %u %u %u\n", num_cmds,
+ rx_mbytes, tx_mbytes, 0);
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_tgt_port_seq_ops = {
+ .start = scsi_tgt_port_seq_start,
+ .next = scsi_tgt_port_seq_next,
+ .stop = scsi_tgt_port_seq_stop,
+ .show = scsi_tgt_port_seq_show
+};
+
+static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_tgt_port_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_port_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_tgt_port_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Authorized Initiator Table:
+ * It contains the SCSI Initiators authorized to be attached to one of the
+ * local Target ports.
+ * Iterates through all active TPGs and extracts the info from the ACLs
+ */
+static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ spin_lock_bh(&se_global->se_tpg_lock);
+ return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos)
+{
+ return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+ se_tpg_list);
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_node_acl *se_nacl;
+ int j;
+
+ if (list_is_first(&se_tpg->se_tpg_list,
+ &se_global->g_se_tpg_list))
+ seq_puts(seq, "inst dev port indx dev_or_port intr_name "
+ "map_indx att_count num_cmds read_mbytes "
+ "write_mbytes hs_num_cmds creation_time row_status\n");
+
+ if (!(se_tpg))
+ return 0;
+
+ spin_lock(&se_tpg->acl_node_lock);
+ list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
+
+ atomic_inc(&se_nacl->mib_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&se_tpg->acl_node_lock);
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+ deve = &se_nacl->device_list[j];
+ if (!(deve->lun_flags &
+ TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+ (!deve->se_lun))
+ continue;
+ lun = deve->se_lun;
+ if (!lun->lun_se_dev)
+ continue;
+
+ seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
+ " %u %s\n",
+ /* scsiInstIndex */
+ (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+ TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+ 0,
+ /* scsiDeviceIndex */
+ lun->lun_se_dev->dev_index,
+ /* scsiAuthIntrTgtPortIndex */
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+ /* scsiAuthIntrIndex */
+ se_nacl->acl_index,
+ /* scsiAuthIntrDevOrPort */
+ 1,
+ /* scsiAuthIntrName */
+ se_nacl->initiatorname[0] ?
+ se_nacl->initiatorname : NONE,
+ /* FIXME: scsiAuthIntrLunMapIndex */
+ 0,
+ /* scsiAuthIntrAttachedTimes */
+ deve->attach_count,
+ /* scsiAuthIntrOutCommands */
+ deve->total_cmds,
+ /* scsiAuthIntrReadMegaBytes */
+ (u32)(deve->read_bytes >> 20),
+ /* scsiAuthIntrWrittenMegaBytes */
+ (u32)(deve->write_bytes >> 20),
+ /* FIXME: scsiAuthIntrHSOutCommands */
+ 0,
+ /* scsiAuthIntrLastCreation */
+ (u32)(((u32)deve->creation_time -
+ INITIAL_JIFFIES) * 100 / HZ),
+ /* FIXME: scsiAuthIntrRowStatus */
+ "Ready");
+ }
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ spin_lock(&se_tpg->acl_node_lock);
+ atomic_dec(&se_nacl->mib_ref_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&se_tpg->acl_node_lock);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_auth_intr_seq_ops = {
+ .start = scsi_auth_intr_seq_start,
+ .next = scsi_auth_intr_seq_next,
+ .stop = scsi_auth_intr_seq_stop,
+ .show = scsi_auth_intr_seq_show
+};
+
+static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_auth_intr_seq_ops);
+}
+
+static const struct file_operations scsi_auth_intr_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_auth_intr_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Attached Initiator Port Table:
+ * It lists the SCSI Initiators attached to one of the local Target ports.
+ * Iterates through all active TPGs and use active sessions from each TPG
+ * to list the info fo this table.
+ */
+static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ spin_lock_bh(&se_global->se_tpg_lock);
+ return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos)
+{
+ return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
+{
+ spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+ se_tpg_list);
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_node_acl *se_nacl;
+ struct se_session *se_sess;
+ unsigned char buf[64];
+ int j;
+
+ if (list_is_first(&se_tpg->se_tpg_list,
+ &se_global->g_se_tpg_list))
+ seq_puts(seq, "inst dev port indx port_auth_indx port_name"
+ " port_ident\n");
+
+ if (!(se_tpg))
+ return 0;
+
+ spin_lock(&se_tpg->session_lock);
+ list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+ if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
+ (!se_sess->se_node_acl) ||
+ (!se_sess->se_node_acl->device_list))
+ continue;
+
+ atomic_inc(&se_sess->mib_ref_count);
+ smp_mb__after_atomic_inc();
+ se_nacl = se_sess->se_node_acl;
+ atomic_inc(&se_nacl->mib_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&se_tpg->session_lock);
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+ deve = &se_nacl->device_list[j];
+ if (!(deve->lun_flags &
+ TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+ (!deve->se_lun))
+ continue;
+
+ lun = deve->se_lun;
+ if (!lun->lun_se_dev)
+ continue;
+
+ memset(buf, 0, 64);
+ if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
+ TPG_TFO(se_tpg)->sess_get_initiator_sid(
+ se_sess, (unsigned char *)&buf[0], 64);
+
+ seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
+ /* scsiInstIndex */
+ (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+ TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+ 0,
+ /* scsiDeviceIndex */
+ lun->lun_se_dev->dev_index,
+ /* scsiPortIndex */
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+ /* scsiAttIntrPortIndex */
+ (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
+ TPG_TFO(se_tpg)->sess_get_index(se_sess) :
+ 0,
+ /* scsiAttIntrPortAuthIntrIdx */
+ se_nacl->acl_index,
+ /* scsiAttIntrPortName */
+ se_nacl->initiatorname[0] ?
+ se_nacl->initiatorname : NONE,
+ /* scsiAttIntrPortIdentifier */
+ buf);
+ }
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ spin_lock(&se_tpg->session_lock);
+ atomic_dec(&se_nacl->mib_ref_count);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&se_sess->mib_ref_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&se_tpg->session_lock);
+
+ return 0;
+}
+
+static const struct seq_operations scsi_att_intr_port_seq_ops = {
+ .start = scsi_att_intr_port_seq_start,
+ .next = scsi_att_intr_port_seq_next,
+ .stop = scsi_att_intr_port_seq_stop,
+ .show = scsi_att_intr_port_seq_show
+};
+
+static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_att_intr_port_seq_ops);
+}
+
+static const struct file_operations scsi_att_intr_port_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_att_intr_port_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return locate_hba_start(seq, pos);
+}
+
+static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
+{
+ locate_hba_stop(seq, v);
+}
+
+#define SCSI_LU_INDEX 1
+static int scsi_lu_seq_show(struct seq_file *seq, void *v)
+{
+ struct se_hba *hba;
+ struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+ g_se_dev_list);
+ struct se_device *dev = se_dev->se_dev_ptr;
+ int j;
+ char str[28];
+
+ if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+ seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
+ " dev_type status state-bit num_cmds read_mbytes"
+ " write_mbytes resets full_stat hs_num_cmds creation_time\n");
+
+ if (!(dev))
+ return 0;
+
+ hba = dev->se_hba;
+ if (!(hba)) {
+ /* Log error ? */
+ return 0;
+ }
+
+ /* Fix LU state, if we can read it from the device */
+ seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
+ dev->dev_index, SCSI_LU_INDEX,
+ (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
+ (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
+ /* scsiLuWwnName */
+ (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
+ "None");
+
+ memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+ /* scsiLuVendorId */
+ for (j = 0; j < 8; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
+ DEV_T10_WWN(dev)->vendor[j] : 0x20;
+ str[8] = 0;
+ seq_printf(seq, " %s", str);
+
+ /* scsiLuProductId */
+ for (j = 0; j < 16; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
+ DEV_T10_WWN(dev)->model[j] : 0x20;
+ str[16] = 0;
+ seq_printf(seq, " %s", str);
+
+ /* scsiLuRevisionId */
+ for (j = 0; j < 4; j++)
+ str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
+ DEV_T10_WWN(dev)->revision[j] : 0x20;
+ str[4] = 0;
+ seq_printf(seq, " %s", str);
+
+ seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
+ /* scsiLuPeripheralType */
+ TRANSPORT(dev)->get_device_type(dev),
+ (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+ "available" : "notavailable", /* scsiLuStatus */
+ "exposed", /* scsiLuState */
+ (unsigned long long)dev->num_cmds,
+ /* scsiLuReadMegaBytes */
+ (u32)(dev->read_bytes >> 20),
+ /* scsiLuWrittenMegaBytes */
+ (u32)(dev->write_bytes >> 20),
+ dev->num_resets, /* scsiLuInResets */
+ 0, /* scsiLuOutTaskSetFullStatus */
+ 0, /* scsiLuHSInCommands */
+ (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
+ 100 / HZ));
+
+ return 0;
+}
+
+static const struct seq_operations scsi_lu_seq_ops = {
+ .start = scsi_lu_seq_start,
+ .next = scsi_lu_seq_next,
+ .stop = scsi_lu_seq_stop,
+ .show = scsi_lu_seq_show
+};
+
+static int scsi_lu_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_lu_seq_ops);
+}
+
+static const struct file_operations scsi_lu_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scsi_lu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/****************************************************************************/
+
+/*
+ * Remove proc fs entries
+ */
+void remove_scsi_target_mib(void)
+{
+ remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_port", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
+ remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
+ remove_proc_entry("scsi_target/mib", NULL);
+}
+
+/*
+ * Create proc fs entries for the mib tables
+ */
+int init_scsi_target_mib(void)
+{
+ struct proc_dir_entry *dir_entry;
+ struct proc_dir_entry *scsi_inst_entry;
+ struct proc_dir_entry *scsi_dev_entry;
+ struct proc_dir_entry *scsi_port_entry;
+ struct proc_dir_entry *scsi_transport_entry;
+ struct proc_dir_entry *scsi_tgt_dev_entry;
+ struct proc_dir_entry *scsi_tgt_port_entry;
+ struct proc_dir_entry *scsi_auth_intr_entry;
+ struct proc_dir_entry *scsi_att_intr_port_entry;
+ struct proc_dir_entry *scsi_lu_entry;
+
+ dir_entry = proc_mkdir("scsi_target/mib", NULL);
+ if (!(dir_entry)) {
+ printk(KERN_ERR "proc_mkdir() failed.\n");
+ return -1;
+ }
+
+ scsi_inst_entry =
+ create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
+ if (scsi_inst_entry)
+ scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
+ else
+ goto error;
+
+ scsi_dev_entry =
+ create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
+ if (scsi_dev_entry)
+ scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
+ else
+ goto error;
+
+ scsi_port_entry =
+ create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
+ if (scsi_port_entry)
+ scsi_port_entry->proc_fops = &scsi_port_seq_fops;
+ else
+ goto error;
+
+ scsi_transport_entry =
+ create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
+ if (scsi_transport_entry)
+ scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
+ else
+ goto error;
+
+ scsi_tgt_dev_entry =
+ create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
+ if (scsi_tgt_dev_entry)
+ scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
+ else
+ goto error;
+
+ scsi_tgt_port_entry =
+ create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
+ if (scsi_tgt_port_entry)
+ scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
+ else
+ goto error;
+
+ scsi_auth_intr_entry =
+ create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
+ if (scsi_auth_intr_entry)
+ scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
+ else
+ goto error;
+
+ scsi_att_intr_port_entry =
+ create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
+ if (scsi_att_intr_port_entry)
+ scsi_att_intr_port_entry->proc_fops =
+ &scsi_att_intr_port_seq_fops;
+ else
+ goto error;
+
+ scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
+ if (scsi_lu_entry)
+ scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
+ else
+ goto error;
+
+ return 0;
+
+error:
+ printk(KERN_ERR "create_proc_entry() failed.\n");
+ remove_scsi_target_mib();
+ return -1;
+}
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables
+ */
+void init_scsi_index_table(void)
+{
+ memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+ spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+ u32 new_index;
+
+ if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+ printk(KERN_ERR "Invalid index type %d\n", type);
+ return -1;
+ }
+
+ spin_lock(&scsi_index_table.lock);
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ if (new_index == 0)
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ spin_unlock(&scsi_index_table.lock);
+
+ return new_index;
+}
+EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
new file mode 100644
index 000000000000..277204633850
--- /dev/null
+++ b/drivers/target/target_core_mib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_MIB_H
+#define TARGET_CORE_MIB_H
+
+typedef enum {
+ SCSI_INST_INDEX,
+ SCSI_DEVICE_INDEX,
+ SCSI_AUTH_INTR_INDEX,
+ SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+ spinlock_t lock;
+ u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
+/* SCSI Port stats */
+struct scsi_port_stats {
+ u64 cmd_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
+} ____cacheline_aligned;
+
+extern int init_scsi_target_mib(void);
+extern void remove_scsi_target_mib(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
+
+#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 000000000000..2521f75362c3
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
+/*******************************************************************************
+ * Filename: target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+ int dest_local_nexus;
+ struct t10_pr_registration *dest_pr_reg;
+ struct se_portal_group *dest_tpg;
+ struct se_node_acl *dest_node_acl;
+ struct se_dev_entry *dest_se_deve;
+ struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+ struct t10_pr_registration *pr_reg,
+ char *buf,
+ u32 size)
+{
+ if (!(pr_reg->isid_present_at_reg))
+ return 0;
+
+ snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+ return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+ struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ switch (cdb[0]) {
+ case INQUIRY:
+ case RELEASE:
+ case RELEASE_10:
+ return 0;
+ default:
+ return 1;
+ }
+
+ return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ int ret;
+
+ if (!(sess))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!dev->dev_reserved_node_acl || !sess) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return -1;
+ }
+ if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+static int core_scsi2_reservation_release(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+
+ if (!(sess) || !(tpg))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!dev->dev_reserved_node_acl || !sess) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+
+ if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_res_bin_isid = 0;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return 0;
+}
+
+static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+
+ if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
+ (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
+ printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+ " ILLEGAL_REQUEST\n");
+ return PYX_TRANSPORT_ILLEGAL_REQUEST;
+ }
+ /*
+ * This is currently the case for target_core_mod passthrough struct se_cmd
+ * ops
+ */
+ if (!(sess) || !(tpg))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reserved_node_acl &&
+ (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+ printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ printk(KERN_ERR "Original reserver LUN: %u %s\n",
+ SE_LUN(cmd)->unpacked_lun,
+ dev->dev_reserved_node_acl->initiatorname);
+ printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", SE_LUN(cmd)->unpacked_lun,
+ cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ dev->dev_reserved_node_acl = sess->se_node_acl;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ if (sess->sess_bin_isid != 0) {
+ dev->dev_res_bin_isid = sess->sess_bin_isid;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+ struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+/*
+ * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
+ * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
+ * thread context.
+ */
+int core_scsi2_emulate_crh(struct se_cmd *cmd)
+{
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct t10_pr_registration *pr_reg;
+ struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
+ unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+ int conflict = 0;
+
+ if (!(se_sess))
+ return 0;
+
+ if (!(crh))
+ goto after_crh;
+
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+ se_sess);
+ if (pr_reg) {
+ /*
+ * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+ * behavior
+ *
+ * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+ * status, but no reservation shall be established and the
+ * persistent reservation shall not be changed, if the command
+ * is received from a) and b) below.
+ *
+ * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+ * status, but the persistent reservation shall not be released,
+ * if the command is received from a) and b)
+ *
+ * a) An I_T nexus that is a persistent reservation holder; or
+ * b) An I_T nexus that is registered if a registrants only or
+ * all registrants type persistent reservation is present.
+ *
+ * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+ * RELEASE(6) command, or RELEASE(10) command shall be processed
+ * as defined in SPC-2.
+ */
+ if (pr_reg->pr_res_holder) {
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+ }
+ if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+ }
+ core_scsi3_put_pr_reg(pr_reg);
+ conflict = 1;
+ } else {
+ /*
+ * Following spc2r20 5.5.1 Reservations overview:
+ *
+ * If a logical unit has executed a PERSISTENT RESERVE OUT
+ * command with the REGISTER or the REGISTER AND IGNORE
+ * EXISTING KEY service action and is still registered by any
+ * initiator, all RESERVE commands and all RELEASE commands
+ * regardless of initiator shall conflict and shall terminate
+ * with a RESERVATION CONFLICT status.
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+ spin_unlock(&pr_tmpl->registration_lock);
+ }
+
+ if (conflict) {
+ printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+ " while active SPC-3 registrations exist,"
+ " returning RESERVATION_CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+after_crh:
+ if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
+ return core_scsi2_reservation_reserve(cmd);
+ else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
+ return core_scsi2_reservation_release(cmd);
+ else
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ struct se_dev_entry *se_deve;
+ struct se_session *se_sess = SE_SESS(cmd);
+ int other_cdb = 0, ignore_reg;
+ int registered_nexus = 0, ret = 1; /* Conflict by default */
+ int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+ int we = 0; /* Write Exclusive */
+ int legacy = 0; /* Act like a legacy device and return
+ * RESERVATION CONFLICT on some CDBs */
+ /*
+ * A legacy SPC-2 reservation is being held.
+ */
+ if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+ return core_scsi2_reservation_seq_non_holder(cmd,
+ cdb, pr_reg_type);
+
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Determine if the registration should be ignored due to
+ * non-matching ISIDs in core_scsi3_pr_reservation_check().
+ */
+ ignore_reg = (pr_reg_type & 0x80000000);
+ if (ignore_reg)
+ pr_reg_type &= ~0x80000000;
+
+ switch (pr_reg_type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ /*
+ * Some commands are only allowed for the persistent reservation
+ * holder.
+ */
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ /*
+ * Some commands are only allowed for registered I_T Nexuses.
+ */
+ reg_only = 1;
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ /*
+ * Each registered I_T Nexus is a reservation holder.
+ */
+ all_reg = 1;
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ default:
+ return -1;
+ }
+ /*
+ * Referenced from spc4r17 table 45 for *NON* PR holder access
+ */
+ switch (cdb[0]) {
+ case SECURITY_PROTOCOL_IN:
+ if (registered_nexus)
+ return 0;
+ ret = (we) ? 0 : 1;
+ break;
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_ATTRIBUTE:
+ case READ_BUFFER:
+ case RECEIVE_DIAGNOSTIC:
+ if (legacy) {
+ ret = 1;
+ break;
+ }
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case PERSISTENT_RESERVE_OUT:
+ /*
+ * This follows PERSISTENT_RESERVE_OUT service actions that
+ * are allowed in the presence of various reservations.
+ * See spc4r17, table 46
+ */
+ switch (cdb[1] & 0x1f) {
+ case PRO_CLEAR:
+ case PRO_PREEMPT:
+ case PRO_PREEMPT_AND_ABORT:
+ ret = (registered_nexus) ? 0 : 1;
+ break;
+ case PRO_REGISTER:
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ ret = 0;
+ break;
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_RESERVE:
+ ret = 1;
+ break;
+ case PRO_RELEASE:
+ ret = (registered_nexus) ? 0 : 1;
+ break;
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return -1;
+ }
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ ret = 0;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ ret = 0;
+ break;
+ case TEST_UNIT_READY:
+ ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+ break;
+ case MAINTENANCE_IN:
+ switch (cdb[1] & 0x1f) {
+ case MI_MANAGEMENT_PROTOCOL_IN:
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case MI_REPORT_SUPPORTED_OPERATION_CODES:
+ case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+ if (legacy) {
+ ret = 1;
+ break;
+ }
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case MI_REPORT_ALIASES:
+ case MI_REPORT_IDENTIFYING_INFORMATION:
+ case MI_REPORT_PRIORITY:
+ case MI_REPORT_TARGET_PGS:
+ case MI_REPORT_TIMESTAMP:
+ ret = 0; /* Allowed */
+ break;
+ default:
+ printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+ (cdb[1] & 0x1f));
+ return -1;
+ }
+ break;
+ case ACCESS_CONTROL_IN:
+ case ACCESS_CONTROL_OUT:
+ case INQUIRY:
+ case LOG_SENSE:
+ case READ_MEDIA_SERIAL_NUMBER:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ ret = 0; /*/ Allowed CDBs */
+ break;
+ default:
+ other_cdb = 1;
+ break;
+ }
+ /*
+ * Case where the CDB is explictly allowed in the above switch
+ * statement.
+ */
+ if (!(ret) && !(other_cdb)) {
+#if 0
+ printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+ " reservation holder\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+ return ret;
+ }
+ /*
+ * Check if write exclusive initiator ports *NOT* holding the
+ * WRITE_EXCLUSIVE_* reservation.
+ */
+ if ((we) && !(registered_nexus)) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /*
+ * Conflict for write exclusive
+ */
+ printk(KERN_INFO "%s Conflict for unregistered nexus"
+ " %s CDB: 0x%02x to %s reservation\n",
+ transport_dump_cmd_direction(cmd),
+ se_sess->se_node_acl->initiatorname, cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+ return 1;
+ } else {
+ /*
+ * Allow non WRITE CDBs for all Write Exclusive
+ * PR TYPEs to pass for registered and
+ * non-registered_nexuxes NOT holding the reservation.
+ *
+ * We only make noise for the unregisterd nexuses,
+ * as we expect registered non-reservation holding
+ * nexuses to issue CDBs.
+ */
+#if 0
+ if (!(registered_nexus)) {
+ printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+ " for %s reservation on unregistered"
+ " nexus\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+ }
+#endif
+ return 0;
+ }
+ } else if ((reg_only) || (all_reg)) {
+ if (registered_nexus) {
+ /*
+ * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+ * allow commands from registered nexuses.
+ */
+#if 0
+ printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+ return 0;
+ }
+ }
+ printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ " for %s reservation\n", transport_dump_cmd_direction(cmd),
+ (registered_nexus) ? "" : "un",
+ se_sess->se_node_acl->initiatorname, cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ u32 prg;
+ /*
+ * PRGeneration field shall contain the value of a 32-bit wrapping
+ * counter mainted by the device server.
+ *
+ * Note that this is done regardless of Active Persist across
+ * Target PowerLoss (APTPL)
+ *
+ * See spc4r17 section 6.3.12 READ_KEYS service action
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ prg = T10_RES(su_dev)->pr_generation++;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+ struct se_cmd *cmd,
+ u32 *pr_reg_type)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ int ret;
+
+ if (!(sess))
+ return 0;
+ /*
+ * A legacy SPC-2 reservation is being held.
+ */
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+ return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!(dev->dev_pr_res_holder)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+ cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+ if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return -1;
+ }
+ if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+ sess->sess_bin_isid) ? 0 : -1;
+ /*
+ * Use bit in *pr_reg_type to notify ISID mismatch in
+ * core_scsi3_pr_seq_non_holder().
+ */
+ if (ret != 0)
+ *pr_reg_type |= 0x80000000;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_pr_registration *pr_reg;
+
+ pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ return NULL;
+ }
+
+ pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+ GFP_ATOMIC);
+ if (!(pr_reg->pr_aptpl_buf)) {
+ printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+ atomic_set(&pr_reg->pr_res_holders, 0);
+ pr_reg->pr_reg_nacl = nacl;
+ pr_reg->pr_reg_deve = deve;
+ pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+ pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+ pr_reg->pr_res_key = sa_res_key;
+ pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+ pr_reg->pr_reg_aptpl = aptpl;
+ pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+ /*
+ * If an ISID value for this SCSI Initiator Port exists,
+ * save it to the registration now.
+ */
+ if (isid != NULL) {
+ pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+ snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+ pr_reg->isid_present_at_reg = 1;
+ }
+
+ return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_dev_entry *deve_tmp;
+ struct se_node_acl *nacl_tmp;
+ struct se_port *port, *port_tmp;
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+ int ret;
+ /*
+ * Create a registration for the I_T Nexus upon which the
+ * PROUT REGISTER was received.
+ */
+ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg))
+ return NULL;
+ /*
+ * Return pointer to pr_reg for ALL_TG_PT=0
+ */
+ if (!(all_tg_pt))
+ return pr_reg;
+ /*
+ * Create list of matching SCSI Initiator Port registrations
+ * for ALL_TG_PT=1
+ */
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+ atomic_inc(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_for_each_entry(deve_tmp, &port->sep_alua_list,
+ alua_port_list) {
+ /*
+ * This pointer will be NULL for demo mode MappedLUNs
+ * that have not been make explict via a ConfigFS
+ * MappedLUN group for the SCSI Initiator Node ACL.
+ */
+ if (!(deve_tmp->se_lun_acl))
+ continue;
+
+ nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+ /*
+ * Skip the matching struct se_node_acl that is allocated
+ * above..
+ */
+ if (nacl == nacl_tmp)
+ continue;
+ /*
+ * Only perform PR registrations for target ports on
+ * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+ * arrived.
+ */
+ if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+ continue;
+ /*
+ * Look for a matching Initiator Node ACL in ASCII format
+ */
+ if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+ continue;
+
+ atomic_inc(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_bh(&port->sep_alua_lock);
+ /*
+ * Grab a configfs group dependency that is released
+ * for the exception path at label out: below, or upon
+ * completion of adding ALL_TG_PT=1 registrations in
+ * __core_scsi3_add_registration()
+ */
+ ret = core_scsi3_lunacl_depend_item(deve_tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend"
+ "_item() failed\n");
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ goto out;
+ }
+ /*
+ * Located a matching SCSI Initiator Port on a different
+ * port, allocate the pr_reg_atp and attach it to the
+ * pr_reg->pr_reg_atp_list that will be processed once
+ * the original *pr_reg is processed in
+ * __core_scsi3_add_registration()
+ */
+ pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+ nacl_tmp, deve_tmp, NULL,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg_atp)) {
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_lunacl_undepend_item(deve_tmp);
+ goto out;
+ }
+
+ list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+ &pr_reg->pr_reg_atp_list);
+ spin_lock_bh(&port->sep_alua_lock);
+ }
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ spin_lock(&dev->se_port_lock);
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return pr_reg;
+out:
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+ }
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+ struct t10_reservation_template *pr_tmpl,
+ u64 sa_res_key,
+ unsigned char *i_port,
+ unsigned char *isid,
+ u32 mapped_lun,
+ unsigned char *t_port,
+ u16 tpgt,
+ u32 target_lun,
+ int res_holder,
+ int all_tg_pt,
+ u8 type)
+{
+ struct t10_pr_registration *pr_reg;
+
+ if (!(i_port) || !(t_port) || !(sa_res_key)) {
+ printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ return -1;
+ }
+
+ pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ return -1;
+ }
+ pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+ INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+ atomic_set(&pr_reg->pr_res_holders, 0);
+ pr_reg->pr_reg_nacl = NULL;
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_res_mapped_lun = mapped_lun;
+ pr_reg->pr_aptpl_target_lun = target_lun;
+ pr_reg->pr_res_key = sa_res_key;
+ pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+ pr_reg->pr_reg_aptpl = 1;
+ pr_reg->pr_reg_tg_pt_lun = NULL;
+ pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+ pr_reg->pr_res_type = type;
+ /*
+ * If an ISID value had been saved in APTPL metadata for this
+ * SCSI Initiator Port, restore it now.
+ */
+ if (isid != NULL) {
+ pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+ snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+ pr_reg->isid_present_at_reg = 1;
+ }
+ /*
+ * Copy the i_port and t_port information from caller.
+ */
+ snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+ snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+ pr_reg->pr_reg_tpgt = tpgt;
+ /*
+ * Set pr_res_holder from caller, the pr_reg who is the reservation
+ * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+ * the Initiator Node LUN ACL from the fabric module is created for
+ * this registration.
+ */
+ pr_reg->pr_res_holder = res_holder;
+
+ list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+ printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+ " metadata\n", (res_holder) ? "+reservation" : "");
+ return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_node_acl *node_acl,
+ struct t10_pr_registration *pr_reg)
+{
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ spin_lock(&dev->dev_reservation_lock);
+ dev->dev_pr_res_holder = pr_reg;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+ " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+ struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ u32 target_lun,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ u16 tpgt;
+
+ memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+ memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+ /*
+ * Copy Initiator Port information from struct se_node_acl
+ */
+ snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+ snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ /*
+ * Look for the matching registrations+reservation from those
+ * created from APTPL metadata. Note that multiple registrations
+ * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+ * TransportIDs.
+ */
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+ pr_reg_aptpl_list) {
+ if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+ (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+ !(strcmp(pr_reg->pr_tport, t_port)) &&
+ (pr_reg->pr_reg_tpgt == tpgt) &&
+ (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+ pr_reg->pr_reg_nacl = nacl;
+ pr_reg->pr_reg_deve = deve;
+ pr_reg->pr_reg_tg_pt_lun = lun;
+
+ list_del(&pr_reg->pr_reg_aptpl_list);
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+ /*
+ * At this point all of the pointers in *pr_reg will
+ * be setup, so go ahead and add the registration.
+ */
+
+ __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+ /*
+ * If this registration is the reservation holder,
+ * make that happen now..
+ */
+ if (pr_reg->pr_res_holder)
+ core_scsi3_aptpl_reserve(dev, tpg,
+ nacl, pr_reg);
+ /*
+ * Reenable pr_aptpl_active to accept new metadata
+ * updates once the SCSI device is active again..
+ */
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ pr_tmpl->pr_aptpl_active = 1;
+ }
+ }
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+ return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+ struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+ lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+ struct target_core_fabric_ops *tfo,
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg,
+ int register_type)
+{
+ struct se_portal_group *se_tpg = nacl->se_tpg;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+ " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+ "_AND_MOVE" : (register_type == 1) ?
+ "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+ (prf_isid) ? i_buf : "");
+ printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+ tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+ tfo->tpg_get_tag(se_tpg));
+ printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ " Port(s)\n", tfo->get_fabric_name(),
+ (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+ TRANSPORT(dev)->name);
+ printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
+ pr_reg->pr_res_key, pr_reg->pr_res_generation,
+ pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg,
+ int register_type,
+ int register_move)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+
+ /*
+ * Increment PRgeneration counter for struct se_device upon a successful
+ * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+ *
+ * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+ * action, the struct se_device->dev_reservation_lock will already be held,
+ * so we do not call core_scsi3_pr_generation() which grabs the lock
+ * for the REGISTER.
+ */
+ pr_reg->pr_res_generation = (register_move) ?
+ T10_RES(su_dev)->pr_generation++ :
+ core_scsi3_pr_generation(dev);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+ pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+ __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+ */
+ if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+ return;
+ /*
+ * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+ * allocated in __core_scsi3_alloc_registration()
+ */
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+ pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_add_tail(&pr_reg_tmp->pr_reg_list,
+ &pr_tmpl->registration_list);
+ pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+ __core_scsi3_dump_registration(tfo, dev,
+ pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+ register_type);
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Drop configfs group dependency reference from
+ * __core_scsi3_alloc_registration()
+ */
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ }
+}
+
+static int core_scsi3_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl,
+ int register_type,
+ int register_move)
+{
+ struct t10_pr_registration *pr_reg;
+
+ pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg))
+ return -1;
+
+ __core_scsi3_add_registration(dev, nacl, pr_reg,
+ register_type, register_move);
+ return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ unsigned char *isid)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct se_portal_group *tpg;
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+ /*
+ * First look for a matching struct se_node_acl
+ */
+ if (pr_reg->pr_reg_nacl != nacl)
+ continue;
+
+ tpg = pr_reg->pr_reg_nacl->se_tpg;
+ /*
+ * If this registration does NOT contain a fabric provided
+ * ISID, then we have found a match.
+ */
+ if (!(pr_reg->isid_present_at_reg)) {
+ /*
+ * Determine if this SCSI device server requires that
+ * SCSI Intiatior TransportID w/ ISIDs is enforced
+ * for fabric modules (iSCSI) requiring them.
+ */
+ if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ if (DEV_ATTRIB(dev)->enforce_pr_isids)
+ continue;
+ }
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ return pr_reg;
+ }
+ /*
+ * If the *pr_reg contains a fabric defined ISID for multi-value
+ * SCSI Initiator Port TransportIDs, then we expect a valid
+ * matching ISID to be provided by the local SCSI Initiator Port.
+ */
+ if (!(isid))
+ continue;
+ if (strcmp(isid, pr_reg->pr_reg_isid))
+ continue;
+
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ return pr_reg;
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_session *sess)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+ unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+ if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ memset(&buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+ PR_REG_ISID_LEN);
+ isid_ptr = &buf[0];
+ }
+
+ return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg)
+{
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct t10_pr_registration *pr_res_holder;
+ int ret = 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return ret;
+ }
+ if (pr_res_holder == pr_reg) {
+ /*
+ * Perform an implict RELEASE if the registration that
+ * is being released is holding the reservation.
+ *
+ * From spc4r17, section 5.7.11.1:
+ *
+ * e) If the I_T nexus is the persistent reservation holder
+ * and the persistent reservation is not an all registrants
+ * type, then a PERSISTENT RESERVE OUT command with REGISTER
+ * service action or REGISTER AND IGNORE EXISTING KEY
+ * service action with the SERVICE ACTION RESERVATION KEY
+ * field set to zero (see 5.7.11.3).
+ */
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+ ret = 1;
+ /*
+ * For 'All Registrants' reservation types, all existing
+ * registrations are still processed as reservation holders
+ * in core_scsi3_pr_seq_non_holder() after the initial
+ * reservation holder is implictly released here.
+ */
+ } else if (pr_reg->pr_reg_all_tg_pt &&
+ (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+ pr_reg->pr_reg_nacl->initiatorname)) &&
+ (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+ " UNREGISTER while existing reservation with matching"
+ " key 0x%016Lx is present from another SCSI Initiator"
+ " Port\n", pr_reg->pr_res_key);
+ ret = -1;
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+/*
+ * Called with struct t10_reservation_template->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg,
+ struct list_head *preempt_and_abort_list,
+ int dec_holders)
+{
+ struct target_core_fabric_ops *tfo =
+ pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ pr_reg->pr_reg_deve->def_pr_registered = 0;
+ pr_reg->pr_reg_deve->pr_res_key = 0;
+ list_del(&pr_reg->pr_reg_list);
+ /*
+ * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+ * so call core_scsi3_put_pr_reg() to decrement our reference.
+ */
+ if (dec_holders)
+ core_scsi3_put_pr_reg(pr_reg);
+ /*
+ * Wait until all reference from any other I_T nexuses for this
+ * *pr_reg have been released. Because list_del() is called above,
+ * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+ * count back to zero, and we release *pr_reg.
+ */
+ while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+ spin_unlock(&pr_tmpl->registration_lock);
+ printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+ tfo->get_fabric_name());
+ cpu_relax();
+ spin_lock(&pr_tmpl->registration_lock);
+ }
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+ " Node: %s%s\n", tfo->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ " Port(s)\n", tfo->get_fabric_name(),
+ (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+ TRANSPORT(dev)->name);
+ printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+ pr_reg->pr_res_generation);
+
+ if (!(preempt_and_abort_list)) {
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_reg_nacl = NULL;
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return;
+ }
+ /*
+ * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+ * are released once the ABORT_TASK_SET has completed..
+ */
+ list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+ struct se_device *dev,
+ struct se_node_acl *nacl)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+ /*
+ * If the passed se_node_acl matches the reservation holder,
+ * release the reservation.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if ((pr_res_holder != NULL) &&
+ (pr_res_holder->pr_reg_nacl == nacl))
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * Release any registration associated with the struct se_node_acl.
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ if (pr_reg->pr_reg_nacl != nacl)
+ continue;
+
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+ struct se_device *dev)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder != NULL) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ pr_res_holder, 0);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+ pr_reg_aptpl_list) {
+ list_del(&pr_reg->pr_reg_aptpl_list);
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ }
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &tpg->tpg_group.cg_item);
+
+ atomic_dec(&tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+
+ if (nacl->dynamic_node_acl)
+ return 0;
+
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+
+ if (nacl->dynamic_node_acl) {
+ atomic_dec(&nacl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ return;
+ }
+
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &nacl->acl_group.cg_item);
+
+ atomic_dec(&nacl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+ struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_node_acl *nacl;
+ struct se_portal_group *tpg;
+ /*
+ * For nacl->dynamic_node_acl=1
+ */
+ if (!(lun_acl))
+ return 0;
+
+ nacl = lun_acl->se_lun_nacl;
+ tpg = nacl->se_tpg;
+
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+ struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_node_acl *nacl;
+ struct se_portal_group *tpg;
+ /*
+ * For nacl->dynamic_node_acl=1
+ */
+ if (!(lun_acl)) {
+ atomic_dec(&se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ return;
+ }
+ nacl = lun_acl->se_lun_nacl;
+ tpg = nacl->se_tpg;
+
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &lun_acl->se_lun_group.cg_item);
+
+ atomic_dec(&se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+ struct se_cmd *cmd,
+ struct se_portal_group *tpg,
+ unsigned char *l_isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_port *tmp_port;
+ struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_node_acl *dest_node_acl = NULL;
+ struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+ struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+ struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+ struct list_head tid_dest_list;
+ struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+ struct target_core_fabric_ops *tmp_tf_ops;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+ char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ u32 tpdl, tid_len = 0;
+ int ret, dest_local_nexus, prf_isid;
+ u32 dest_rtpi = 0;
+
+ memset(dest_iport, 0, 64);
+ INIT_LIST_HEAD(&tid_dest_list);
+
+ local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Allocate a struct pr_transport_id_holder and setup the
+ * local_node_acl and local_se_deve pointers and add to
+ * struct list_head tid_dest_list for add registration
+ * processing in the loop of tid_dest_list below.
+ */
+ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+ if (!(tidh_new)) {
+ printk(KERN_ERR "Unable to allocate tidh_new\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ INIT_LIST_HEAD(&tidh_new->dest_list);
+ tidh_new->dest_tpg = tpg;
+ tidh_new->dest_node_acl = se_sess->se_node_acl;
+ tidh_new->dest_se_deve = local_se_deve;
+
+ local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ se_sess->se_node_acl, local_se_deve, l_isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(local_pr_reg)) {
+ kfree(tidh_new);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ tidh_new->dest_pr_reg = local_pr_reg;
+ /*
+ * The local I_T nexus does not hold any configfs dependances,
+ * so we set tid_h->dest_local_nexus=1 to prevent the
+ * configfs_undepend_item() calls in the tid_dest_list loops below.
+ */
+ tidh_new->dest_local_nexus = 1;
+ list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+ /*
+ * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+ * first extract TransportID Parameter Data Length, and make sure
+ * the value matches up to the SCSI expected data transfer length.
+ */
+ tpdl = (buf[24] & 0xff) << 24;
+ tpdl |= (buf[25] & 0xff) << 16;
+ tpdl |= (buf[26] & 0xff) << 8;
+ tpdl |= buf[27] & 0xff;
+
+ if ((tpdl + 28) != cmd->data_length) {
+ printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+ " does not equal CDB data_length: %u\n", tpdl,
+ cmd->data_length);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ /*
+ * Start processing the received transport IDs using the
+ * receiving I_T Nexus portal's fabric dependent methods to
+ * obtain the SCSI Initiator Port/Device Identifiers.
+ */
+ ptr = &buf[28];
+
+ while (tpdl > 0) {
+ proto_ident = (ptr[0] & 0x0f);
+ dest_tpg = NULL;
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+ tmp_tpg = tmp_port->sep_tpg;
+ if (!(tmp_tpg))
+ continue;
+ tmp_tf_ops = TPG_TFO(tmp_tpg);
+ if (!(tmp_tf_ops))
+ continue;
+ if (!(tmp_tf_ops->get_fabric_proto_ident) ||
+ !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+ continue;
+ /*
+ * Look for the matching proto_ident provided by
+ * the received TransportID
+ */
+ tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+ if (tmp_proto_ident != proto_ident)
+ continue;
+ dest_rtpi = tmp_port->sep_rtpi;
+
+ i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+ tmp_tpg, (const char *)ptr, &tid_len,
+ &iport_ptr);
+ if (!(i_str))
+ continue;
+
+ atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ ret = core_scsi3_tpg_depend_item(tmp_tpg);
+ if (ret != 0) {
+ printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+ " for tmp_tpg\n");
+ atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ /*
+ * Locate the desination initiator ACL to be registered
+ * from the decoded fabric module specific TransportID
+ * at *i_str.
+ */
+ spin_lock_bh(&tmp_tpg->acl_node_lock);
+ dest_node_acl = __core_tpg_get_initiator_node_acl(
+ tmp_tpg, i_str);
+ if (dest_node_acl) {
+ atomic_inc(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ }
+ spin_unlock_bh(&tmp_tpg->acl_node_lock);
+
+ if (!(dest_node_acl)) {
+ core_scsi3_tpg_undepend_item(tmp_tpg);
+ spin_lock(&dev->se_port_lock);
+ continue;
+ }
+
+ ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+ if (ret != 0) {
+ printk(KERN_ERR "configfs_depend_item() failed"
+ " for dest_node_acl->acl_group\n");
+ atomic_dec(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_tpg_undepend_item(tmp_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+
+ dest_tpg = tmp_tpg;
+ printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+ " %s Port RTPI: %hu\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, dest_rtpi);
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ if (!(dest_tpg)) {
+ printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+ " dest_tpg\n");
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+#if 0
+ printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+ " tid_len: %d for %s + %s\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+ tpdl, tid_len, i_str, iport_ptr);
+#endif
+ if (tid_len > tpdl) {
+ printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+ " %u for Transport ID: %s\n", tid_len, ptr);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ /*
+ * Locate the desintation struct se_dev_entry pointer for matching
+ * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+ * Target Port.
+ */
+ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+ dest_rtpi);
+ if (!(dest_se_deve)) {
+ printk(KERN_ERR "Unable to locate %s dest_se_deve"
+ " from destination RTPI: %hu\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_rtpi);
+
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+ " failed\n");
+ atomic_dec(&dest_se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+ " dest_se_deve mapped_lun: %u\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+ /*
+ * Skip any TransportIDs that already have a registration for
+ * this target port.
+ */
+ pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ if (pr_reg_e) {
+ core_scsi3_put_pr_reg(pr_reg_e);
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ptr += tid_len;
+ tpdl -= tid_len;
+ tid_len = 0;
+ continue;
+ }
+ /*
+ * Allocate a struct pr_transport_id_holder and setup
+ * the dest_node_acl and dest_se_deve pointers for the
+ * loop below.
+ */
+ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+ GFP_KERNEL);
+ if (!(tidh_new)) {
+ printk(KERN_ERR "Unable to allocate tidh_new\n");
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ INIT_LIST_HEAD(&tidh_new->dest_list);
+ tidh_new->dest_tpg = dest_tpg;
+ tidh_new->dest_node_acl = dest_node_acl;
+ tidh_new->dest_se_deve = dest_se_deve;
+
+ /*
+ * Allocate, but do NOT add the registration for the
+ * TransportID referenced SCSI Initiator port. This
+ * done because of the following from spc4r17 in section
+ * 6.14.3 wrt SPEC_I_PT:
+ *
+ * "If a registration fails for any initiator port (e.g., if th
+ * logical unit does not have enough resources available to
+ * hold the registration information), no registrations shall be
+ * made, and the command shall be terminated with
+ * CHECK CONDITION status."
+ *
+ * That means we call __core_scsi3_alloc_registration() here,
+ * and then call __core_scsi3_add_registration() in the
+ * 2nd loop which will never fail.
+ */
+ dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_node_acl, dest_se_deve, iport_ptr,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(dest_pr_reg)) {
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ kfree(tidh_new);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ tidh_new->dest_pr_reg = dest_pr_reg;
+ list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+ ptr += tid_len;
+ tpdl -= tid_len;
+ tid_len = 0;
+
+ }
+ /*
+ * Go ahead and create a registrations from tid_dest_list for the
+ * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+ * and dest_se_deve.
+ *
+ * The SA Reservation Key from the PROUT is set for the
+ * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1
+ * means that the TransportID Initiator port will be
+ * registered on all of the target ports in the SCSI target device
+ * ALL_TG_PT=0 means the registration will only be for the
+ * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+ * was received.
+ */
+ list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+ dest_tpg = tidh->dest_tpg;
+ dest_node_acl = tidh->dest_node_acl;
+ dest_se_deve = tidh->dest_se_deve;
+ dest_pr_reg = tidh->dest_pr_reg;
+ dest_local_nexus = tidh->dest_local_nexus;
+
+ list_del(&tidh->dest_list);
+ kfree(tidh);
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+ dest_pr_reg, 0, 0);
+
+ printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+ " registered Transport ID for Node: %s%s Mapped LUN:"
+ " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, (prf_isid) ?
+ &i_buf[0] : "", dest_se_deve->mapped_lun);
+
+ if (dest_local_nexus)
+ continue;
+
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ }
+
+ return 0;
+out:
+ /*
+ * For the failure case, release everything from tid_dest_list
+ * including *dest_pr_reg and the configfs dependances..
+ */
+ list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+ dest_tpg = tidh->dest_tpg;
+ dest_node_acl = tidh->dest_node_acl;
+ dest_se_deve = tidh->dest_se_deve;
+ dest_pr_reg = tidh->dest_pr_reg;
+ dest_local_nexus = tidh->dest_local_nexus;
+
+ list_del(&tidh->dest_list);
+ kfree(tidh);
+ /*
+ * Release any extra ALL_TG_PT=1 registrations for
+ * the SPEC_I_PT=1 case.
+ */
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &dest_pr_reg->pr_reg_atp_list,
+ pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+ }
+
+ kfree(dest_pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+ if (dest_local_nexus)
+ continue;
+
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ }
+ return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len,
+ int clear_aptpl_metadata)
+{
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char tmp[512], isid_buf[32];
+ ssize_t len = 0;
+ int reg_count = 0;
+
+ memset(buf, 0, pr_aptpl_buf_len);
+ /*
+ * Called to clear metadata once APTPL has been deactivated.
+ */
+ if (clear_aptpl_metadata) {
+ snprintf(buf, pr_aptpl_buf_len,
+ "No Registrations or Reservations\n");
+ return 0;
+ }
+ /*
+ * Walk the registration list..
+ */
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+
+ tmp[0] = '\0';
+ isid_buf[0] = '\0';
+ tpg = pr_reg->pr_reg_nacl->se_tpg;
+ lun = pr_reg->pr_reg_tg_pt_lun;
+ /*
+ * Write out any ISID value to APTPL metadata that was included
+ * in the original registration.
+ */
+ if (pr_reg->isid_present_at_reg)
+ snprintf(isid_buf, 32, "initiator_sid=%s\n",
+ pr_reg->pr_reg_isid);
+ /*
+ * Include special metadata if the pr_reg matches the
+ * reservation holder.
+ */
+ if (dev->dev_pr_res_holder == pr_reg) {
+ snprintf(tmp, 512, "PR_REG_START: %d"
+ "\ninitiator_fabric=%s\n"
+ "initiator_node=%s\n%s"
+ "sa_res_key=%llu\n"
+ "res_holder=1\nres_type=%02x\n"
+ "res_scope=%02x\nres_all_tg_pt=%d\n"
+ "mapped_lun=%u\n", reg_count,
+ TPG_TFO(tpg)->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+ pr_reg->pr_res_key, pr_reg->pr_res_type,
+ pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+ pr_reg->pr_res_mapped_lun);
+ } else {
+ snprintf(tmp, 512, "PR_REG_START: %d\n"
+ "initiator_fabric=%s\ninitiator_node=%s\n%s"
+ "sa_res_key=%llu\nres_holder=0\n"
+ "res_all_tg_pt=%d\nmapped_lun=%u\n",
+ reg_count, TPG_TFO(tpg)->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+ pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+ pr_reg->pr_res_mapped_lun);
+ }
+
+ if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ printk(KERN_ERR "Unable to update renaming"
+ " APTPL metadata\n");
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+ return -1;
+ }
+ len += sprintf(buf+len, "%s", tmp);
+
+ /*
+ * Include information about the associated SCSI target port.
+ */
+ snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+ "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+ " %d\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+ if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ printk(KERN_ERR "Unable to update renaming"
+ " APTPL metadata\n");
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+ return -1;
+ }
+ len += sprintf(buf+len, "%s", tmp);
+ reg_count++;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ if (!(reg_count))
+ len += sprintf(buf+len, "No Registrations or Reservations");
+
+ return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len,
+ int clear_aptpl_metadata)
+{
+ int ret;
+
+ spin_lock(&dev->dev_reservation_lock);
+ ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+ clear_aptpl_metadata);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len)
+{
+ struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+ struct file *file;
+ struct iovec iov[1];
+ mm_segment_t old_fs;
+ int flags = O_RDWR | O_CREAT | O_TRUNC;
+ char path[512];
+ int ret;
+
+ memset(iov, 0, sizeof(struct iovec));
+ memset(path, 0, 512);
+
+ if (strlen(&wwn->unit_serial[0]) > 512) {
+ printk(KERN_ERR "WWN value for struct se_device does not fit"
+ " into path buffer\n");
+ return -1;
+ }
+
+ snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+ file = filp_open(path, flags, 0600);
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+ " failed\n", path);
+ return -1;
+ }
+
+ iov[0].iov_base = &buf[0];
+ if (!(pr_aptpl_buf_len))
+ iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+ else
+ iov[0].iov_len = pr_aptpl_buf_len;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+ set_fs(old_fs);
+
+ if (ret < 0) {
+ printk("Error writing APTPL metadata file: %s\n", path);
+ filp_close(file, NULL);
+ return -1;
+ }
+ filp_close(file, NULL);
+
+ return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+ struct se_device *dev,
+ unsigned char *in_buf,
+ u32 in_pr_aptpl_buf_len)
+{
+ unsigned char null_buf[64], *buf;
+ u32 pr_aptpl_buf_len;
+ int ret, clear_aptpl_metadata = 0;
+ /*
+ * Can be called with a NULL pointer from PROUT service action CLEAR
+ */
+ if (!(in_buf)) {
+ memset(null_buf, 0, 64);
+ buf = &null_buf[0];
+ /*
+ * This will clear the APTPL metadata to:
+ * "No Registrations or Reservations" status
+ */
+ pr_aptpl_buf_len = 64;
+ clear_aptpl_metadata = 1;
+ } else {
+ buf = in_buf;
+ pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+ }
+
+ ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+ clear_aptpl_metadata);
+ if (ret != 0)
+ return -1;
+ /*
+ * __core_scsi3_write_aptpl_to_file() will call strlen()
+ * on the passed buf to determine pr_aptpl_buf_len.
+ */
+ ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+ if (ret != 0)
+ return -1;
+
+ return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+ struct se_cmd *cmd,
+ u64 res_key,
+ u64 sa_res_key,
+ int aptpl,
+ int all_tg_pt,
+ int spec_i_pt,
+ int ignore_key)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ /* Used for APTPL metadata w/ UNREGISTER */
+ unsigned char *pr_aptpl_buf = NULL;
+ unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ int pr_holder = 0, ret = 0, type;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ se_tpg = se_sess->se_tpg;
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+ if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+ PR_REG_ISID_LEN);
+ isid_ptr = &isid_buf[0];
+ }
+ /*
+ * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+ */
+ pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+ if (!(pr_reg_e)) {
+ if (res_key) {
+ printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+ " for SA REGISTER, returning CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * Do nothing but return GOOD status.
+ */
+ if (!(sa_res_key))
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
+ if (!(spec_i_pt)) {
+ /*
+ * Perform the Service Action REGISTER on the Initiator
+ * Port Endpoint that the PRO was received from on the
+ * Logical Unit of the SCSI device server.
+ */
+ ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ se_sess->se_node_acl, se_deve, isid_ptr,
+ sa_res_key, all_tg_pt, aptpl,
+ ignore_key, 0);
+ if (ret != 0) {
+ printk(KERN_ERR "Unable to allocate"
+ " struct t10_pr_registration\n");
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ } else {
+ /*
+ * Register both the Initiator port that received
+ * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+ * TransportID from Parameter list and loop through
+ * fabric dependent parameter list while calling
+ * logic from of core_scsi3_alloc_registration() for
+ * each TransportID provided SCSI Initiator Port/Device
+ */
+ ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+ isid_ptr, sa_res_key, all_tg_pt, aptpl);
+ if (ret != 0)
+ return ret;
+ }
+ /*
+ * Nothing left to do for the APTPL=0 case.
+ */
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ " REGISTER\n");
+ return 0;
+ }
+ /*
+ * Locate the newly allocated local I_T Nexus *pr_reg, and
+ * update the APTPL metadata information using its
+ * preallocated *pr_reg->pr_aptpl_buf.
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ se_sess->se_node_acl, se_sess);
+
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
+ } else {
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = pr_reg_e;
+ type = pr_reg->pr_res_type;
+
+ if (!(ignore_key)) {
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key,
+ pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ }
+ if (spec_i_pt) {
+ printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+ " set while sa_res_key=0\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * An existing ALL_TG_PT=1 registration being released
+ * must also set ALL_TG_PT=1 in the incoming PROUT.
+ */
+ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+ printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ " registration exists, but ALL_TG_PT=1 bit not"
+ " present in received PROUT\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+ /*
+ * Allocate APTPL metadata buffer used for UNREGISTER ops
+ */
+ if (aptpl) {
+ pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+ GFP_KERNEL);
+ if (!(pr_aptpl_buf)) {
+ printk(KERN_ERR "Unable to allocate"
+ " pr_aptpl_buf\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ }
+ /*
+ * sa_res_key=0 Unregister Reservation Key for registered I_T
+ * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+ * Nexus.
+ */
+ if (!(sa_res_key)) {
+ pr_holder = core_scsi3_check_implict_release(
+ SE_DEV(cmd), pr_reg);
+ if (pr_holder < 0) {
+ kfree(pr_aptpl_buf);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ spin_lock(&pr_tmpl->registration_lock);
+ /*
+ * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+ * and matching pr_res_key.
+ */
+ if (pr_reg->pr_reg_all_tg_pt) {
+ list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ if (!(pr_reg_p->pr_reg_all_tg_pt))
+ continue;
+
+ if (pr_reg_p->pr_res_key != res_key)
+ continue;
+
+ if (pr_reg == pr_reg_p)
+ continue;
+
+ if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg_p->pr_reg_nacl->initiatorname))
+ continue;
+
+ __core_scsi3_free_registration(dev,
+ pr_reg_p, NULL, 0);
+ }
+ }
+ /*
+ * Release the calling I_T Nexus registration now..
+ */
+ __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+ NULL, 1);
+ /*
+ * From spc4r17, section 5.7.11.3 Unregistering
+ *
+ * If the persistent reservation is a registrants only
+ * type, the device server shall establish a unit
+ * attention condition for the initiator port associated
+ * with every registered I_T nexus except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, with the additional sense code set to
+ * RESERVATIONS RELEASED.
+ */
+ if (pr_holder &&
+ ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+ (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ core_scsi3_ua_allocate(
+ pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for UNREGISTER\n");
+ return 0;
+ }
+
+ ret = core_scsi3_update_and_write_aptpl(dev,
+ &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated"
+ " for UNREGISTER\n");
+ }
+
+ kfree(pr_aptpl_buf);
+ return ret;
+ } else {
+ /*
+ * Increment PRgeneration counter for struct se_device"
+ * upon a successful REGISTER, see spc4r17 section 6.3.2
+ * READ_KEYS service action.
+ */
+ pr_reg->pr_res_generation = core_scsi3_pr_generation(
+ SE_DEV(cmd));
+ pr_reg->pr_res_key = sa_res_key;
+ printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ " Key for %s to: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+ (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+ pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ core_scsi3_put_pr_reg(pr_reg);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for REGISTER\n");
+ return 0;
+ }
+
+ ret = core_scsi3_update_and_write_aptpl(dev,
+ &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated"
+ " for REGISTER\n");
+ }
+
+ kfree(pr_aptpl_buf);
+ core_scsi3_put_pr_reg(pr_reg);
+ }
+ }
+ return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ return "Write Exclusive Access";
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ return "Exclusive Access";
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ return "Write Exclusive Access, Registrants Only";
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ return "Exclusive Access, Registrants Only";
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ return "Write Exclusive Access, All Registrants";
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ return "Exclusive Access, All Registrants";
+ default:
+ break;
+ }
+
+ return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int ret, prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ se_tpg = se_sess->se_tpg;
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for RESERVE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * An application client creates a persistent reservation by issuing
+ * a PERSISTENT RESERVE OUT command with RESERVE service action through
+ * a registered I_T nexus with the following parameters:
+ * a) RESERVATION KEY set to the value of the reservation key that is
+ * registered with the logical unit for the I_T nexus; and
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+ " does not match existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * From above:
+ * b) TYPE field and SCOPE field set to the persistent reservation
+ * being created.
+ *
+ * Only one persistent reservation is allowed at a time per logical unit
+ * and that persistent reservation has a scope of LU_SCOPE.
+ */
+ if (scope != PR_SCOPE_LU_SCOPE) {
+ printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * See if we have an existing PR reservation holder pointer at
+ * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+ * *pr_res_holder.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if ((pr_res_holder)) {
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If the device server receives a PERSISTENT RESERVE OUT
+ * command from an I_T nexus other than a persistent reservation
+ * holder (see 5.7.10) that attempts to create a persistent
+ * reservation when a persistent reservation already exists for
+ * the logical unit, then the command shall be completed with
+ * RESERVATION CONFLICT status.
+ */
+ if (pr_res_holder != pr_reg) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ " [%s]: %s while reservation already held by"
+ " [%s]: %s, returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If a persistent reservation holder attempts to modify the
+ * type or scope of an existing persistent reservation, the
+ * command shall be completed with RESERVATION CONFLICT status.
+ */
+ if ((pr_res_holder->pr_res_type != type) ||
+ (pr_res_holder->pr_res_scope != scope)) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ " [%s]: %s trying to change TYPE and/or SCOPE,"
+ " while reservation already held by [%s]: %s,"
+ " returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If the device server receives a PERSISTENT RESERVE OUT
+ * command with RESERVE service action where the TYPE field and
+ * the SCOPE field contain the same values as the existing type
+ * and scope from a persistent reservation holder, it shall not
+ * make any change to the existing persistent reservation and
+ * shall completethe command with GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ /*
+ * Otherwise, our *pr_reg becomes the PR reservation holder for said
+ * TYPE/SCOPE. Also set the received scope and type in *pr_reg.
+ */
+ pr_reg->pr_res_scope = scope;
+ pr_reg->pr_res_type = type;
+ pr_reg->pr_res_holder = 1;
+ dev->dev_pr_res_holder = pr_reg;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ " for RESERVE\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ int ret = 0;
+
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+ break;
+ default:
+ printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+ " 0x%02x\n", type);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+ struct se_device *dev,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int explict)
+{
+ struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Go ahead and release the current PR reservation holder.
+ */
+ dev->dev_pr_res_holder = NULL;
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ tfo->get_fabric_name(), se_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ /*
+ * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+ */
+ pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ int ret, all_reg = 0;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for RELEASE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing:
+ *
+ * If there is no persistent reservation or in response to a persistent
+ * reservation release request from a registered I_T nexus that is not a
+ * persistent reservation holder (see 5.7.10), the device server shall
+ * do the following:
+ *
+ * a) Not release the persistent reservation, if any;
+ * b) Not remove any registrations; and
+ * c) Complete the command with GOOD status.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ /*
+ * No persistent reservation, return GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+ all_reg = 1;
+
+ if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+ /*
+ * Non 'All Registrants' PR Type cases..
+ * Release request from a registered I_T nexus that is not a
+ * persistent reservation holder. return GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing:
+ *
+ * Only the persistent reservation holder (see 5.7.10) is allowed to
+ * release a persistent reservation.
+ *
+ * An application client releases the persistent reservation by issuing
+ * a PERSISTENT RESERVE OUT command with RELEASE service action through
+ * an I_T nexus that is a persistent reservation holder with the
+ * following parameters:
+ *
+ * a) RESERVATION KEY field set to the value of the reservation key
+ * that is registered with the logical unit for the I_T nexus;
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+ " does not match existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing and above:
+ *
+ * b) TYPE field and SCOPE field set to match the persistent
+ * reservation being released.
+ */
+ if ((pr_res_holder->pr_res_type != type) ||
+ (pr_res_holder->pr_res_scope != scope)) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+ " reservation from [%s]: %s with different TYPE "
+ "and/or SCOPE while reservation already held by"
+ " [%s]: %s, returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * In response to a persistent reservation release request from the
+ * persistent reservation holder the device server shall perform a
+ * release by doing the following as an uninterrupted series of actions:
+ * a) Release the persistent reservation;
+ * b) Not remove any registration(s);
+ * c) If the released persistent reservation is a registrants only type
+ * or all registrants type persistent reservation,
+ * the device server shall establish a unit attention condition for
+ * the initiator port associated with every regis-
+ * tered I_T nexus other than I_T nexus on which the PERSISTENT
+ * RESERVE OUT command with RELEASE service action was received,
+ * with the additional sense code set to RESERVATIONS RELEASED; and
+ * d) If the persistent reservation is of any other type, the device
+ * server shall not establish a unit attention condition.
+ */
+ __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+ pr_reg, 1);
+
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+ (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+ (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+ (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ /*
+ * If no UNIT ATTENTION conditions will be established for
+ * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+ * go ahead and check for APTPL=1 update+write below
+ */
+ goto write_aptpl;
+ }
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+ pr_reg_list) {
+ /*
+ * Do not establish a UNIT ATTENTION condition
+ * for the calling I_T Nexus
+ */
+ if (pr_reg_p == pr_reg)
+ continue;
+
+ core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+ struct se_cmd *cmd,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_node_acl *pr_reg_nacl;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+ u32 pr_res_mapped_lun = 0;
+ int calling_it_nexus = 0;
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ se_sess->se_node_acl, se_sess);
+ if (!(pr_reg_n)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for CLEAR\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 section 5.7.11.6, Clearing:
+ *
+ * Any application client may release the persistent reservation and
+ * remove all registrations from a device server by issuing a
+ * PERSISTENT RESERVE OUT command with CLEAR service action through a
+ * registered I_T nexus with the following parameter:
+ *
+ * a) RESERVATION KEY field set to the value of the reservation key
+ * that is registered with the logical unit for the I_T nexus.
+ */
+ if (res_key != pr_reg_n->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * a) Release the persistent reservation, if any;
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ pr_res_holder, 0);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * b) Remove all registration(s) (see spc4r17 5.7.7);
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg, NULL,
+ calling_it_nexus);
+ /*
+ * e) Establish a unit attention condition for the initiator
+ * port associated with every registered I_T nexus other
+ * than the I_T nexus on which the PERSISTENT RESERVE OUT
+ * command with CLEAR service action was received, with the
+ * additional sense code set to RESERVATIONS PREEMPTED.
+ */
+ if (!(calling_it_nexus))
+ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+ 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+ CMD_TFO(cmd)->get_fabric_name());
+
+ if (pr_tmpl->pr_aptpl_active) {
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ " for CLEAR\n");
+ }
+
+ core_scsi3_pr_generation(dev);
+ return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg,
+ struct list_head *preempt_and_abort_list,
+ int type,
+ int scope,
+ int abort)
+{
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Do an implict RELEASE of the existing reservation.
+ */
+ if (dev->dev_pr_res_holder)
+ __core_scsi3_complete_pro_release(dev, nacl,
+ dev->dev_pr_res_holder, 0);
+
+ dev->dev_pr_res_holder = pr_reg;
+ pr_reg->pr_res_holder = 1;
+ pr_reg->pr_res_type = type;
+ pr_reg->pr_res_scope = scope;
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+ core_scsi3_pr_dump_type(type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+ tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+ nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+ /*
+ * For PREEMPT_AND_ABORT, add the preempting reservation's
+ * struct t10_pr_registration to the list that will be compared
+ * against received CDBs..
+ */
+ if (preempt_and_abort_list)
+ list_add_tail(&pr_reg->pr_reg_abort_list,
+ preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+ struct list_head *preempt_and_abort_list,
+ struct t10_pr_registration *pr_reg_holder)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+ pr_reg_abort_list) {
+
+ list_del(&pr_reg->pr_reg_abort_list);
+ if (pr_reg_holder == pr_reg)
+ continue;
+ if (pr_reg->pr_res_holder) {
+ printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+ continue;
+ }
+
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_reg_nacl = NULL;
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ }
+}
+
+int core_scsi3_check_cdb_abort_and_preempt(
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *cmd)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+ pr_reg_abort_list) {
+ if (pr_reg->pr_res_key == cmd->pr_res_key)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int core_scsi3_pro_preempt(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key,
+ u64 sa_res_key,
+ int abort)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_node_acl *pr_reg_nacl;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct list_head preempt_and_abort_list;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ u32 pr_res_mapped_lun = 0;
+ int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+ int prh_type = 0, prh_scope = 0, ret;
+
+ if (!(se_sess))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg_n)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for PREEMPT%s\n",
+ (abort) ? "_AND_ABORT" : "");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ if (pr_reg_n->pr_res_key != res_key) {
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ if (scope != PR_SCOPE_LU_SCOPE) {
+ printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ INIT_LIST_HEAD(&preempt_and_abort_list);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder &&
+ ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+ all_reg = 1;
+
+ if (!(all_reg) && !(sa_res_key)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+ *
+ * If the SERVICE ACTION RESERVATION KEY field does not identify a
+ * persistent reservation holder or there is no persistent reservation
+ * holder (i.e., there is no persistent reservation), then the device
+ * server shall perform a preempt by doing the following in an
+ * uninterrupted series of actions. (See below..)
+ */
+ if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+ /*
+ * No existing or SA Reservation Key matching reservations..
+ *
+ * PROUT SA PREEMPT with All Registrant type reservations are
+ * allowed to be processed without a matching SA Reservation Key
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+ /*
+ * Removing of registrations in non all registrants
+ * type reservations without a matching SA reservation
+ * key.
+ *
+ * a) Remove the registrations for all I_T nexuses
+ * specified by the SERVICE ACTION RESERVATION KEY
+ * field;
+ * b) Ignore the contents of the SCOPE and TYPE fields;
+ * c) Process tasks as defined in 5.7.1; and
+ * d) Establish a unit attention condition for the
+ * initiator port associated with every I_T nexus
+ * that lost its registration other than the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command
+ * was received, with the additional sense code set
+ * to REGISTRATIONS PREEMPTED.
+ */
+ if (!(all_reg)) {
+ if (pr_reg->pr_res_key != sa_res_key)
+ continue;
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list :
+ NULL, calling_it_nexus);
+ released_regs++;
+ } else {
+ /*
+ * Case for any existing all registrants type
+ * reservation, follow logic in spc4r17 section
+ * 5.7.11.4 Preempting, Table 52 and Figure 7.
+ *
+ * For a ZERO SA Reservation key, release
+ * all other registrations and do an implict
+ * release of active persistent reservation.
+ *
+ * For a non-ZERO SA Reservation key, only
+ * release the matching reservation key from
+ * registrations.
+ */
+ if ((sa_res_key) &&
+ (pr_reg->pr_res_key != sa_res_key))
+ continue;
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list :
+ NULL, 0);
+ released_regs++;
+ }
+ if (!(calling_it_nexus))
+ core_scsi3_ua_allocate(pr_reg_nacl,
+ pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+ * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+ * RESERVATION KEY field to a value that does not match any
+ * registered reservation key, then the device server shall
+ * complete the command with RESERVATION CONFLICT status.
+ */
+ if (!(released_regs)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * For an existing all registrants type reservation
+ * with a zero SA rservation key, preempt the existing
+ * reservation with the new PR type and scope.
+ */
+ if (pr_res_holder && all_reg && !(sa_res_key)) {
+ __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+ (abort) ? &preempt_and_abort_list : NULL,
+ type, scope, abort);
+
+ if (abort)
+ core_scsi3_release_preempt_and_abort(
+ &preempt_and_abort_list, pr_reg_n);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg_n->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+ " metadata for PREEMPT%s\n", (abort) ?
+ "_AND_ABORT" : "");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg_n);
+ core_scsi3_pr_generation(SE_DEV(cmd));
+ return 0;
+ }
+ /*
+ * The PREEMPTing SA reservation key matches that of the
+ * existing persistent reservation, first, we check if
+ * we are preempting our own reservation.
+ * From spc4r17, section 5.7.11.4.3 Preempting
+ * persistent reservations and registration handling
+ *
+ * If an all registrants persistent reservation is not
+ * present, it is not an error for the persistent
+ * reservation holder to preempt itself (i.e., a
+ * PERSISTENT RESERVE OUT with a PREEMPT service action
+ * or a PREEMPT AND ABORT service action with the
+ * SERVICE ACTION RESERVATION KEY value equal to the
+ * persistent reservation holder's reservation key that
+ * is received from the persistent reservation holder).
+ * In that case, the device server shall establish the
+ * new persistent reservation and maintain the
+ * registration.
+ */
+ prh_type = pr_res_holder->pr_res_type;
+ prh_scope = pr_res_holder->pr_res_scope;
+ /*
+ * If the SERVICE ACTION RESERVATION KEY field identifies a
+ * persistent reservation holder (see 5.7.10), the device
+ * server shall perform a preempt by doing the following as
+ * an uninterrupted series of actions:
+ *
+ * a) Release the persistent reservation for the holder
+ * identified by the SERVICE ACTION RESERVATION KEY field;
+ */
+ if (pr_reg_n != pr_res_holder)
+ __core_scsi3_complete_pro_release(dev,
+ pr_res_holder->pr_reg_nacl,
+ dev->dev_pr_res_holder, 0);
+ /*
+ * b) Remove the registrations for all I_T nexuses identified
+ * by the SERVICE ACTION RESERVATION KEY field, except the
+ * I_T nexus that is being used for the PERSISTENT RESERVE
+ * OUT command. If an all registrants persistent reservation
+ * is present and the SERVICE ACTION RESERVATION KEY field
+ * is set to zero, then all registrations shall be removed
+ * except for that of the I_T nexus that is being used for
+ * the PERSISTENT RESERVE OUT command;
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ if (pr_reg->pr_res_key != sa_res_key)
+ continue;
+
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list : NULL,
+ calling_it_nexus);
+ /*
+ * e) Establish a unit attention condition for the initiator
+ * port associated with every I_T nexus that lost its
+ * persistent reservation and/or registration, with the
+ * additional sense code set to REGISTRATIONS PREEMPTED;
+ */
+ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * c) Establish a persistent reservation for the preempting
+ * I_T nexus using the contents of the SCOPE and TYPE fields;
+ */
+ __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+ (abort) ? &preempt_and_abort_list : NULL,
+ type, scope, abort);
+ /*
+ * d) Process tasks as defined in 5.7.1;
+ * e) See above..
+ * f) If the type or scope has changed, then for every I_T nexus
+ * whose reservation key was not removed, except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, the device server shall establish a unit
+ * attention condition for the initiator port associated with
+ * that I_T nexus, with the additional sense code set to
+ * RESERVATIONS RELEASED. If the type or scope have not
+ * changed, then no unit attention condition(s) shall be
+ * established for this reason.
+ */
+ if ((prh_type != type) || (prh_scope != scope)) {
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+ pr_reg->pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * Call LUN_RESET logic upon list of struct t10_pr_registration,
+ * All received CDBs for the matching existing reservation and
+ * registrations undergo ABORT_TASK logic.
+ *
+ * From there, core_scsi3_release_preempt_and_abort() will
+ * release every registration in the list (which have already
+ * been removed from the primary pr_reg list), except the
+ * new persistent reservation holder, the calling Initiator Port.
+ */
+ if (abort) {
+ core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+ core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+ pr_reg_n);
+ }
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg_n->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+ "%s\n", (abort) ? "_AND_ABORT" : "");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg_n);
+ core_scsi3_pr_generation(SE_DEV(cmd));
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key,
+ u64 sa_res_key,
+ int abort)
+{
+ int ret = 0;
+
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ ret = core_scsi3_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, abort);
+ break;
+ default:
+ printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+ " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+ struct se_cmd *cmd,
+ u64 res_key,
+ u64 sa_res_key,
+ int aptpl,
+ int unreg)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+ struct se_port *se_port;
+ struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+ struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+ struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *initiator_str;
+ char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ u32 tid_len, tmp_tid_len;
+ int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+ unsigned short rtpi;
+ unsigned char proto_ident;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ memset(dest_iport, 0, 64);
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ se_tpg = se_sess->se_tpg;
+ tf_ops = TPG_TFO(se_tpg);
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+ * Register behaviors for a REGISTER AND MOVE service action
+ *
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+ " *pr_reg for REGISTER_AND_MOVE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * The provided reservation key much match the existing reservation key
+ * provided during this initiator's I_T nexus registration.
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+ " res_key: 0x%016Lx does not match existing SA REGISTER"
+ " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * The service active reservation key needs to be non zero
+ */
+ if (!(sa_res_key)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+ " sa_res_key\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * Determine the Relative Target Port Identifier where the reservation
+ * will be moved to for the TransportID containing SCSI initiator WWN
+ * information.
+ */
+ rtpi = (buf[18] & 0xff) << 8;
+ rtpi |= buf[19] & 0xff;
+ tid_len = (buf[20] & 0xff) << 24;
+ tid_len |= (buf[21] & 0xff) << 16;
+ tid_len |= (buf[22] & 0xff) << 8;
+ tid_len |= buf[23] & 0xff;
+
+ if ((tid_len + 24) != cmd->data_length) {
+ printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+ " does not equal CDB data_length: %u\n", tid_len,
+ cmd->data_length);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+ if (se_port->sep_rtpi != rtpi)
+ continue;
+ dest_se_tpg = se_port->sep_tpg;
+ if (!(dest_se_tpg))
+ continue;
+ dest_tf_ops = TPG_TFO(dest_se_tpg);
+ if (!(dest_tf_ops))
+ continue;
+
+ atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+ if (ret != 0) {
+ printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+ " for dest_se_tpg\n");
+ atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ if (!(dest_se_tpg) || (!dest_tf_ops)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ " fabric ops from Relative Target Port Identifier:"
+ " %hu\n", rtpi);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ proto_ident = (buf[24] & 0x0f);
+#if 0
+ printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+ " 0x%02x\n", proto_ident);
+#endif
+ if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+ " proto_ident: 0x%02x does not match ident: 0x%02x"
+ " from fabric: %s\n", proto_ident,
+ dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+ dest_tf_ops->get_fabric_name());
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+ " containg a valid tpg_parse_pr_out_transport_id"
+ " function pointer\n");
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+ (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+ if (!(initiator_str)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ " initiator_str from Transport ID\n");
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+ " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+ "port" : "device", initiator_str, (iport_ptr != NULL) ?
+ iport_ptr : "");
+ /*
+ * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+ * action specifies a TransportID that is the same as the initiator port
+ * of the I_T nexus for the command received, then the command shall
+ * be terminated with CHECK CONDITION status, with the sense key set to
+ * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+ * IN PARAMETER LIST.
+ */
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ matching_iname = (!strcmp(initiator_str,
+ pr_reg_nacl->initiatorname)) ? 1 : 0;
+ if (!(matching_iname))
+ goto after_iport_check;
+
+ if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+ " matches: %s on received I_T Nexus\n", initiator_str,
+ pr_reg_nacl->initiatorname);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+ " matches: %s %s on received I_T Nexus\n",
+ initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+ pr_reg->pr_reg_isid);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+after_iport_check:
+ /*
+ * Locate the destination struct se_node_acl from the received Transport ID
+ */
+ spin_lock_bh(&dest_se_tpg->acl_node_lock);
+ dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+ initiator_str);
+ if (dest_node_acl) {
+ atomic_inc(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ }
+ spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+
+ if (!(dest_node_acl)) {
+ printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+ " TransportID%s\n", dest_tf_ops->get_fabric_name(),
+ initiator_str);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+ if (ret != 0) {
+ printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+ " dest_node_acl\n");
+ atomic_dec(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ dest_node_acl = NULL;
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+ " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+ dest_node_acl->initiatorname);
+#endif
+ /*
+ * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+ * PORT IDENTIFIER.
+ */
+ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+ if (!(dest_se_deve)) {
+ printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+ " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+ atomic_dec(&dest_se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ dest_se_deve = NULL;
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+ " ACL for dest_se_deve->mapped_lun: %u\n",
+ dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+ dest_se_deve->mapped_lun);
+#endif
+ /*
+ * A persistent reservation needs to already existing in order to
+ * successfully complete the REGISTER_AND_MOVE service action..
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+ " currently held\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ goto out;
+ }
+ /*
+ * The received on I_T Nexus must be the reservation holder.
+ *
+ * From spc4r17 section 5.7.8 Table 50 --
+ * Register behaviors for a REGISTER AND MOVE service action
+ */
+ if (pr_res_holder != pr_reg) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+ " Nexus is not reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out;
+ }
+ /*
+ * From spc4r17 section 5.7.8: registering and moving reservation
+ *
+ * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+ * action is received and the established persistent reservation is a
+ * Write Exclusive - All Registrants type or Exclusive Access -
+ * All Registrants type reservation, then the command shall be completed
+ * with RESERVATION CONFLICT status.
+ */
+ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+ " reservation for type: %s\n",
+ core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out;
+ }
+ pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ /*
+ * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+ */
+ type = pr_res_holder->pr_res_type;
+ scope = pr_res_holder->pr_res_type;
+ /*
+ * c) Associate the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field with the I_T nexus specified as the
+ * destination of the register and move, where:
+ * A) The I_T nexus is specified by the TransportID and the
+ * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+ * B) Regardless of the TransportID format used, the association for
+ * the initiator port is based on either the initiator port name
+ * (see 3.1.71) on SCSI transport protocols where port names are
+ * required or the initiator port identifier (see 3.1.70) on SCSI
+ * transport protocols where port names are not required;
+ * d) Register the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field;
+ * e) Retain the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field and associated information;
+ *
+ * Also, It is not an error for a REGISTER AND MOVE service action to
+ * register an I_T nexus that is already registered with the same
+ * reservation key or a different reservation key.
+ */
+ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ if (!(dest_pr_reg)) {
+ ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_node_acl, dest_se_deve, iport_ptr,
+ sa_res_key, 0, aptpl, 2, 1);
+ if (ret != 0) {
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ new_reg = 1;
+ }
+ /*
+ * f) Release the persistent reservation for the persistent reservation
+ * holder (i.e., the I_T nexus on which the
+ */
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ dev->dev_pr_res_holder, 0);
+ /*
+ * g) Move the persistent reservation to the specified I_T nexus using
+ * the same scope and type as the persistent reservation released in
+ * item f); and
+ */
+ dev->dev_pr_res_holder = dest_pr_reg;
+ dest_pr_reg->pr_res_holder = 1;
+ dest_pr_reg->pr_res_type = type;
+ pr_reg->pr_res_scope = scope;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Increment PRGeneration for existing registrations..
+ */
+ if (!(new_reg))
+ dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+ " created new reservation holder TYPE: %s on object RTPI:"
+ " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+ core_scsi3_pr_dump_type(type), rtpi,
+ dest_pr_reg->pr_res_generation);
+ printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+ " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+ tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+ dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+ iport_ptr : "");
+ /*
+ * It is now safe to release configfs group dependencies for destination
+ * of Transport ID Initiator Device/Port Identifier
+ */
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_se_tpg);
+ /*
+ * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+ * nexus on which PERSISTENT RESERVE OUT command was received.
+ */
+ if (unreg) {
+ spin_lock(&pr_tmpl->registration_lock);
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+ spin_unlock(&pr_tmpl->registration_lock);
+ } else
+ core_scsi3_put_pr_reg(pr_reg);
+
+ /*
+ * Clear the APTPL metadata if APTPL has been disabled, otherwise
+ * write out the updated metadata to struct file for this SCSI device.
+ */
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ " REGISTER_AND_MOVE\n");
+ } else {
+ pr_tmpl->pr_aptpl_active = 1;
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &dest_pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Set APTPL Bit Activated for"
+ " REGISTER_AND_MOVE\n");
+ }
+
+ core_scsi3_put_pr_reg(dest_pr_reg);
+ return 0;
+out:
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ if (dest_node_acl)
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_se_tpg);
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+ __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+{
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u64 res_key, sa_res_key;
+ int sa, scope, type, aptpl;
+ int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+ /*
+ * FIXME: A NULL struct se_session pointer means an this is not coming from
+ * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+ */
+ if (!(SE_SESS(cmd)))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ if (cmd->data_length < 24) {
+ printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
+ " length too small: %u\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+ */
+ sa = (cdb[1] & 0x1f);
+ scope = (cdb[2] & 0xf0);
+ type = (cdb[2] & 0x0f);
+ /*
+ * From PERSISTENT_RESERVE_OUT parameter list (payload)
+ */
+ res_key = core_scsi3_extract_reservation_key(&buf[0]);
+ sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+ /*
+ * REGISTER_AND_MOVE uses a different SA parameter list containing
+ * SCSI TransportIDs.
+ */
+ if (sa != PRO_REGISTER_AND_MOVE) {
+ spec_i_pt = (buf[20] & 0x08);
+ all_tg_pt = (buf[20] & 0x04);
+ aptpl = (buf[20] & 0x01);
+ } else {
+ aptpl = (buf[17] & 0x01);
+ unreg = (buf[17] & 0x02);
+ }
+ /*
+ * SPEC_I_PT=1 is only valid for Service action: REGISTER
+ */
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ /*
+ * From spc4r17 section 6.14:
+ *
+ * If the SPEC_I_PT bit is set to zero, the service action is not
+ * REGISTER AND MOVE, and the parameter list length is not 24, then
+ * the command shall be terminated with CHECK CONDITION status, with
+ * the sense key set to ILLEGAL REQUEST, and the additional sense
+ * code set to PARAMETER LIST LENGTH ERROR.
+ */
+ if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ (cmd->data_length != 24)) {
+ printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
+ " list length: %u\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * (core_scsi3_emulate_pro_* function parameters
+ * are defined by spc4r17 Table 174:
+ * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+ */
+ switch (sa) {
+ case PRO_REGISTER:
+ return core_scsi3_emulate_pro_register(cmd,
+ res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+ case PRO_RESERVE:
+ return core_scsi3_emulate_pro_reserve(cmd,
+ type, scope, res_key);
+ case PRO_RELEASE:
+ return core_scsi3_emulate_pro_release(cmd,
+ type, scope, res_key);
+ case PRO_CLEAR:
+ return core_scsi3_emulate_pro_clear(cmd, res_key);
+ case PRO_PREEMPT:
+ return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, 0);
+ case PRO_PREEMPT_AND_ABORT:
+ return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, 1);
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ return core_scsi3_emulate_pro_register(cmd,
+ 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+ case PRO_REGISTER_AND_MOVE:
+ return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+ sa_res_key, aptpl, unreg);
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 add_len = 0, off = 8;
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+ /*
+ * Check for overflow of 8byte PRI READ_KEYS payload and
+ * next reservation key list descriptor.
+ */
+ if ((add_len + 8) > (cmd->data_length - 8))
+ break;
+
+ buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+ buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+ add_len += 8;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u64 pr_res_key;
+ u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&se_dev->dev_reservation_lock);
+ pr_reg = se_dev->dev_pr_res_holder;
+ if ((pr_reg)) {
+ /*
+ * Set the hardcoded Additional Length
+ */
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ if (cmd->data_length < 22) {
+ spin_unlock(&se_dev->dev_reservation_lock);
+ return 0;
+ }
+ /*
+ * Set the Reservation key.
+ *
+ * From spc4r17, section 5.7.10:
+ * A persistent reservation holder has its reservation key
+ * returned in the parameter data from a PERSISTENT
+ * RESERVE IN command with READ RESERVATION service action as
+ * follows:
+ * a) For a persistent reservation of the type Write Exclusive
+ * - All Registrants or Exclusive Access ­ All Regitrants,
+ * the reservation key shall be set to zero; or
+ * b) For all other persistent reservation types, the
+ * reservation key shall be set to the registered
+ * reservation key for the I_T nexus that holds the
+ * persistent reservation.
+ */
+ if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+ pr_res_key = 0;
+ else
+ pr_res_key = pr_reg->pr_res_key;
+
+ buf[8] = ((pr_res_key >> 56) & 0xff);
+ buf[9] = ((pr_res_key >> 48) & 0xff);
+ buf[10] = ((pr_res_key >> 40) & 0xff);
+ buf[11] = ((pr_res_key >> 32) & 0xff);
+ buf[12] = ((pr_res_key >> 24) & 0xff);
+ buf[13] = ((pr_res_key >> 16) & 0xff);
+ buf[14] = ((pr_res_key >> 8) & 0xff);
+ buf[15] = (pr_res_key & 0xff);
+ /*
+ * Set the SCOPE and TYPE
+ */
+ buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+ (pr_reg->pr_res_type & 0x0f);
+ }
+ spin_unlock(&se_dev->dev_reservation_lock);
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u16 add_len = 8; /* Hardcoded to 8. */
+
+ if (cmd->data_length < 6) {
+ printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+ " %u too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((add_len << 8) & 0xff);
+ buf[1] = (add_len & 0xff);
+ buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+ buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+ buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+ /*
+ * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+ * set the TMV: Task Mask Valid bit.
+ */
+ buf[3] |= 0x80;
+ /*
+ * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+ */
+ buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+ /*
+ * PTPL_A: Persistence across Target Power Loss Active bit
+ */
+ if (pr_tmpl->pr_aptpl_active)
+ buf[3] |= 0x01;
+ /*
+ * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+ */
+ buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+ buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+ buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+ buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_node_acl *se_nacl;
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+ u32 off = 8; /* off into first Full Status descriptor */
+ int format_code = 0;
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ se_nacl = pr_reg->pr_reg_nacl;
+ se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+ add_desc_len = 0;
+
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Determine expected length of $FABRIC_MOD specific
+ * TransportID full status descriptor..
+ */
+ exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+ se_tpg, se_nacl, pr_reg, &format_code);
+
+ if ((exp_desc_len + add_len) > cmd->data_length) {
+ printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+ " out of buffer: %d\n", cmd->data_length);
+ spin_lock(&pr_tmpl->registration_lock);
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+ break;
+ }
+ /*
+ * Set RESERVATION KEY
+ */
+ buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+ buf[off++] = (pr_reg->pr_res_key & 0xff);
+ off += 4; /* Skip Over Reserved area */
+
+ /*
+ * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+ */
+ if (pr_reg->pr_reg_all_tg_pt)
+ buf[off] = 0x02;
+ /*
+ * The struct se_lun pointer will be present for the
+ * reservation holder for PR_HOLDER bit.
+ *
+ * Also, if this registration is the reservation
+ * holder, fill in SCOPE and TYPE in the next byte.
+ */
+ if (pr_reg->pr_res_holder) {
+ buf[off++] |= 0x01;
+ buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+ (pr_reg->pr_res_type & 0x0f);
+ } else
+ off += 2;
+
+ off += 4; /* Skip over reserved area */
+ /*
+ * From spc4r17 6.3.15:
+ *
+ * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+ * IDENTIFIER field contains the relative port identifier (see
+ * 3.1.120) of the target port that is part of the I_T nexus
+ * described by this full status descriptor. If the ALL_TG_PT
+ * bit is set to one, the contents of the RELATIVE TARGET PORT
+ * IDENTIFIER field are not defined by this standard.
+ */
+ if (!(pr_reg->pr_reg_all_tg_pt)) {
+ struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ } else
+ off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+ /*
+ * Now, have the $FABRIC_MOD fill in the protocol identifier
+ */
+ desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+ se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+ /*
+ * Set the ADDITIONAL DESCRIPTOR LENGTH
+ */
+ buf[off++] = ((desc_len >> 24) & 0xff);
+ buf[off++] = ((desc_len >> 16) & 0xff);
+ buf[off++] = ((desc_len >> 8) & 0xff);
+ buf[off++] = (desc_len & 0xff);
+ /*
+ * Size of full desctipor header minus TransportID
+ * containing $FABRIC_MOD specific) initiator device/port
+ * WWN information.
+ *
+ * See spc4r17 Section 6.13.5 Table 169
+ */
+ add_desc_len = (24 + desc_len);
+
+ off += desc_len;
+ add_len += add_desc_len;
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Set ADDITIONAL_LENGTH
+ */
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ return 0;
+}
+
+static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+{
+ switch (cdb[1] & 0x1f) {
+ case PRI_READ_KEYS:
+ return core_scsi3_pri_read_keys(cmd);
+ case PRI_READ_RESERVATION:
+ return core_scsi3_pri_read_reservation(cmd);
+ case PRI_REPORT_CAPABILITIES:
+ return core_scsi3_pri_report_capabilities(cmd);
+ case PRI_READ_FULL_STATUS:
+ return core_scsi3_pri_read_full_status(cmd);
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+}
+
+int core_scsi3_emulate_pr(struct se_cmd *cmd)
+{
+ unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ struct se_device *dev = cmd->se_dev;
+ /*
+ * Following spc2r20 5.5.1 Reservations overview:
+ *
+ * If a logical unit has been reserved by any RESERVE command and is
+ * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+ * PERSISTENT RESERVE OUT commands shall conflict regardless of
+ * initiator or service action and shall terminate with a RESERVATION
+ * CONFLICT status.
+ */
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
+ core_scsi3_emulate_pr_out(cmd, cdb) :
+ core_scsi3_emulate_pr_in(cmd, cdb);
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+ return 0;
+}
+
+static int core_pt_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_reservation_template *rest = &su_dev->t10_reservation;
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, use the reservations
+ * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
+ * cause a problem because libata and some SATA RAID HBAs appear
+ * under Linux/SCSI, but to emulate reservations themselves.
+ */
+ if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+ rest->res_type = SPC_PASSTHROUGH;
+ rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+ rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+ " emulation\n", TRANSPORT(dev)->name);
+ return 0;
+ }
+ /*
+ * If SPC-3 or above is reported by real or emulated struct se_device,
+ * use emulated Persistent Reservations.
+ */
+ if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+ rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+ rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+ " emulation\n", TRANSPORT(dev)->name);
+ } else {
+ rest->res_type = SPC2_RESERVATIONS;
+ rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+ rest->pr_ops.t10_seq_non_holder =
+ &core_scsi2_reservation_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+ TRANSPORT(dev)->name);
+ }
+
+ return 0;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 000000000000..5603bcfd86d3
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER 0x00
+#define PRO_RESERVE 0x01
+#define PRO_RELEASE 0x02
+#define PRO_CLEAR 0x03
+#define PRO_PREEMPT 0x04
+#define PRO_PREEMPT_AND_ABORT 0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
+#define PRO_REGISTER_AND_MOVE 0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS 0x00
+#define PRI_READ_RESERVATION 0x01
+#define PRI_REPORT_CAPABILITIES 0x02
+#define PRI_READ_FULL_STATUS 0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE 0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE 0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
+
+#define PR_APTPL_MAX_IPORT_LEN 256
+#define PR_APTPL_MAX_TPORT_LEN 256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+ char *, u32);
+extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+ struct t10_reservation_template *, u64,
+ unsigned char *, unsigned char *, u32,
+ unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+ struct se_portal_group *, struct se_lun *,
+ struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+ struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
+ struct se_cmd *);
+extern int core_scsi3_emulate_pr(struct se_cmd *);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 000000000000..742d24609a9b
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
+/*******************************************************************************
+ * Filename: target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/* pscsi_get_sh():
+ *
+ *
+ */
+static struct Scsi_Host *pscsi_get_sh(u32 host_no)
+{
+ struct Scsi_Host *sh = NULL;
+
+ sh = scsi_host_lookup(host_no);
+ if (IS_ERR(sh)) {
+ printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
+ " %u\n", host_no);
+ return NULL;
+ }
+
+ return sh;
+}
+
+/* pscsi_attach_hba():
+ *
+ * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ * from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ int hba_depth;
+ struct pscsi_hba_virt *phv;
+
+ phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+ if (!(phv)) {
+ printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+ return -1;
+ }
+ phv->phv_host_id = host_id;
+ phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ hba->hba_ptr = (void *)phv;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
+ " Target Core with TCQ Depth: %d\n", hba->hba_id,
+ atomic_read(&hba->max_queue_depth));
+
+ return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+ if (scsi_host) {
+ scsi_host_put(scsi_host);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+ " Generic Target Core\n", hba->hba_id,
+ (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+ "Unknown");
+ } else
+ printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+ " from Generic Target Core\n", hba->hba_id);
+
+ kfree(phv);
+ hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct Scsi_Host *sh = phv->phv_lld_host;
+ int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+ /*
+ * Release the struct Scsi_Host
+ */
+ if (!(mode_flag)) {
+ if (!(sh))
+ return 0;
+
+ phv->phv_lld_host = NULL;
+ phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+ " %s\n", hba->hba_id, (sh->hostt->name) ?
+ (sh->hostt->name) : "Unknown");
+
+ scsi_host_put(sh);
+ return 0;
+ }
+ /*
+ * Otherwise, locate struct Scsi_Host from the original passed
+ * pSCSI Host ID and enable for phba mode
+ */
+ sh = pscsi_get_sh(phv->phv_host_id);
+ if (!(sh)) {
+ printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+ " phv_host_id: %d\n", phv->phv_host_id);
+ return -1;
+ }
+ /*
+ * Usually the SCSI LLD will use the hostt->can_queue value to define
+ * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
+ * this at all and set sh->can_queue at runtime.
+ */
+ hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
+ sh->hostt->can_queue : sh->can_queue;
+
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ phv->phv_lld_host = sh;
+ phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+ hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+ return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+ struct scsi_device *sdev)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+ int ret;
+
+ buf = kzalloc(12, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = MODE_SENSE;
+ cdb[4] = 0x0c; /* 12 bytes */
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+ HZ, 1, NULL);
+ if (ret)
+ goto out_free;
+
+ /*
+ * If MODE_SENSE still returns zero, set the default value to 1024.
+ */
+ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+ if (!sdev->sector_size)
+ sdev->sector_size = 1024;
+out_free:
+ kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+ unsigned char *buf;
+
+ if (sdev->inquiry_len < INQUIRY_LEN)
+ return;
+
+ buf = sdev->inquiry;
+ if (!buf)
+ return;
+ /*
+ * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+ */
+ memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+ memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+ memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+ int ret;
+
+ buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+ if (!buf)
+ return -1;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = INQUIRY;
+ cdb[1] = 0x01; /* Query VPD */
+ cdb[2] = 0x80; /* Unit Serial Number */
+ cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+ cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+ INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+ if (ret)
+ goto out_free;
+
+ snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+ wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+ kfree(buf);
+ return 0;
+
+out_free:
+ kfree(buf);
+ return -1;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+ struct t10_wwn *wwn)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+ int ident_len, page_len, off = 4, ret;
+ struct t10_vpd *vpd;
+
+ buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = INQUIRY;
+ cdb[1] = 0x01; /* Query VPD */
+ cdb[2] = 0x83; /* Device Identifier */
+ cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+ cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+ INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+ NULL, HZ, 1, NULL);
+ if (ret)
+ goto out;
+
+ page_len = (buf[2] << 8) | buf[3];
+ while (page_len > 0) {
+ /* Grab a pointer to the Identification descriptor */
+ page_83 = &buf[off];
+ ident_len = page_83[3];
+ if (!ident_len) {
+ printk(KERN_ERR "page_83[3]: identifier"
+ " length zero!\n");
+ break;
+ }
+ printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+
+ vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+ if (!vpd) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct t10_vpd\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&vpd->vpd_list);
+
+ transport_set_vpd_proto_id(vpd, page_83);
+ transport_set_vpd_assoc(vpd, page_83);
+
+ if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ kfree(vpd);
+ continue;
+ }
+ if (transport_set_vpd_ident(vpd, page_83) < 0) {
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ kfree(vpd);
+ continue;
+ }
+
+ list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ }
+
+out:
+ kfree(buf);
+}
+
+/* pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ struct pscsi_dev_virt *pdv,
+ struct scsi_device *sd,
+ int dev_flags)
+{
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct request_queue *q;
+ struct queue_limits *limits;
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ if (!sd->queue_depth) {
+ sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+ printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+ " queue_depth to %d\n", sd->channel, sd->id,
+ sd->lun, sd->queue_depth);
+ }
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = sd->request_queue;
+ limits = &dev_limits.limits;
+ limits->logical_block_size = sd->sector_size;
+ limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
+ queue_max_hw_sectors(q) : sd->host->max_sectors;
+ limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
+ queue_max_sectors(q) : sd->host->max_sectors;
+ dev_limits.hw_queue_depth = sd->queue_depth;
+ dev_limits.queue_depth = sd->queue_depth;
+ /*
+ * Setup our standard INQUIRY info into se_dev->t10_wwn
+ */
+ pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+ /*
+ * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+ * which has already been referenced with Linux SCSI code with
+ * scsi_device_get() in this file's pscsi_create_virtdevice().
+ *
+ * The passthrough operations called by the transport_add_device_*
+ * function below will require this pointer to be set for passthroug
+ * ops.
+ *
+ * For the shutdown case in pscsi_free_device(), this struct
+ * scsi_device reference is released with Linux SCSI code
+ * scsi_device_put() and the pdv->pdv_sd cleared.
+ */
+ pdv->pdv_sd = sd;
+
+ dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+ se_dev, dev_flags, (void *)pdv,
+ &dev_limits, NULL, NULL);
+ if (!(dev)) {
+ pdv->pdv_sd = NULL;
+ return NULL;
+ }
+
+ /*
+ * Locate VPD WWN Information used for various purposes within
+ * the Storage Engine.
+ */
+ if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+ /*
+ * If VPD Unit Serial returned GOOD status, try
+ * VPD Device Identification page (0x83).
+ */
+ pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+ }
+
+ /*
+ * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+ */
+ if (sd->type == TYPE_TAPE)
+ pscsi_tape_read_blocksize(dev, sd);
+ return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct pscsi_dev_virt *pdv;
+
+ pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+ if (!(pdv)) {
+ printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+ return NULL;
+ }
+ pdv->pdv_se_hba = hba;
+
+ printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+ return (void *)pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ struct block_device *bd;
+ u32 dev_flags = 0;
+
+ if (scsi_device_get(sd)) {
+ printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ sh->host_no, sd->channel, sd->id, sd->lun);
+ spin_unlock_irq(sh->host_lock);
+ return NULL;
+ }
+ spin_unlock_irq(sh->host_lock);
+ /*
+ * Claim exclusive struct block_device access to struct scsi_device
+ * for TYPE_DISK using supplied udev_path
+ */
+ bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+ if (!(bd)) {
+ printk("pSCSI: blkdev_get_by_path() failed\n");
+ scsi_device_put(sd);
+ return NULL;
+ }
+ pdv->pdv_bd = bd;
+
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev)) {
+ blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ scsi_device_put(sd);
+ return NULL;
+ }
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ u32 dev_flags = 0;
+
+ if (scsi_device_get(sd)) {
+ printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ sh->host_no, sd->channel, sd->id, sd->lun);
+ spin_unlock_irq(sh->host_lock);
+ return NULL;
+ }
+ spin_unlock_irq(sh->host_lock);
+
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev)) {
+ scsi_device_put(sd);
+ return NULL;
+ }
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+ sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ u32 dev_flags = 0;
+
+ spin_unlock_irq(sh->host_lock);
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev))
+ return NULL;
+
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+ sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+ struct se_device *dev;
+ struct scsi_device *sd;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct Scsi_Host *sh = phv->phv_lld_host;
+ int legacy_mode_enable = 0;
+
+ if (!(pdv)) {
+ printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+ " parameter\n");
+ return NULL;
+ }
+ /*
+ * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+ * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+ */
+ if (!(sh)) {
+ if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+ printk(KERN_ERR "pSCSI: Unable to locate struct"
+ " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+ return NULL;
+ }
+ /*
+ * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+ * reference, we enforce that udev_path has been set
+ */
+ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+ printk(KERN_ERR "pSCSI: udev_path attribute has not"
+ " been set before ENABLE=1\n");
+ return NULL;
+ }
+ /*
+ * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+ * use the original TCM hba ID to reference Linux/SCSI Host No
+ * and enable for PHV_LLD_SCSI_HOST_NO mode.
+ */
+ if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+ spin_lock(&hba->device_lock);
+ if (!(list_empty(&hba->hba_dev_list))) {
+ printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+ " with active devices\n");
+ spin_unlock(&hba->device_lock);
+ return NULL;
+ }
+ spin_unlock(&hba->device_lock);
+
+ if (pscsi_pmode_enable_hba(hba, 1) != 1)
+ return NULL;
+
+ legacy_mode_enable = 1;
+ hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+ sh = phv->phv_lld_host;
+ } else {
+ sh = pscsi_get_sh(pdv->pdv_host_id);
+ if (!(sh)) {
+ printk(KERN_ERR "pSCSI: Unable to locate"
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+ return NULL;
+ }
+ }
+ } else {
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
+ printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ " struct Scsi_Host exists\n");
+ return NULL;
+ }
+ }
+
+ spin_lock_irq(sh->host_lock);
+ list_for_each_entry(sd, &sh->__devices, siblings) {
+ if ((pdv->pdv_channel_id != sd->channel) ||
+ (pdv->pdv_target_id != sd->id) ||
+ (pdv->pdv_lun_id != sd->lun))
+ continue;
+ /*
+ * Functions will release the held struct scsi_host->host_lock
+ * before calling calling pscsi_add_device_to_list() to register
+ * struct scsi_device with target_core_mod.
+ */
+ switch (sd->type) {
+ case TYPE_DISK:
+ dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+ break;
+ case TYPE_ROM:
+ dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+ break;
+ default:
+ dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+ break;
+ }
+
+ if (!(dev)) {
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ scsi_host_put(sh);
+ else if (legacy_mode_enable) {
+ pscsi_pmode_enable_hba(hba, 0);
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+ }
+ pdv->pdv_sd = NULL;
+ return NULL;
+ }
+ return dev;
+ }
+ spin_unlock_irq(sh->host_lock);
+
+ printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+ pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
+
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ scsi_host_put(sh);
+ else if (legacy_mode_enable) {
+ pscsi_pmode_enable_hba(hba, 0);
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+ }
+
+ return NULL;
+}
+
+/* pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+ struct pscsi_dev_virt *pdv = p;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ if (sd) {
+ /*
+ * Release exclusive pSCSI internal struct block_device claim for
+ * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+ */
+ if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+ blkdev_put(pdv->pdv_bd,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ pdv->pdv_bd = NULL;
+ }
+ /*
+ * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+ * to struct Scsi_Host now.
+ */
+ if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+ (phv->phv_lld_host != NULL))
+ scsi_host_put(phv->phv_lld_host);
+
+ if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+ scsi_device_put(sd);
+
+ pdv->pdv_sd = NULL;
+ }
+
+ kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+ return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/* pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+ int result;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ unsigned char *cdb = &pt->pscsi_cdb[0];
+
+ result = pt->pscsi_result;
+ /*
+ * Hack to make sure that Write-Protect modepage is set if R/O mode is
+ * forced.
+ */
+ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+ (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ if (!TASK_CMD(task)->se_deve)
+ goto after_mode_sense;
+
+ if (TASK_CMD(task)->se_deve->lun_flags &
+ TRANSPORT_LUNFLAGS_READ_ONLY) {
+ unsigned char *buf = (unsigned char *)
+ T_TASK(task->task_se_cmd)->t_task_buf;
+
+ if (cdb[0] == MODE_SENSE_10) {
+ if (!(buf[3] & 0x80))
+ buf[3] |= 0x80;
+ } else {
+ if (!(buf[2] & 0x80))
+ buf[2] |= 0x80;
+ }
+ }
+ }
+after_mode_sense:
+
+ if (sd->type != TYPE_TAPE)
+ goto after_mode_select;
+
+ /*
+ * Hack to correctly obtain the initiator requested blocksize for
+ * TYPE_TAPE. Since this value is dependent upon each tape media,
+ * struct scsi_device->sector_size will not contain the correct value
+ * by default, so we go ahead and set it so
+ * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+ * storage engine.
+ */
+ if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+ (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ unsigned char *buf;
+ struct scatterlist *sg = task->task_sg;
+ u16 bdl;
+ u32 blocksize;
+
+ buf = sg_virt(&sg[0]);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to get buf for scatterlist\n");
+ goto after_mode_select;
+ }
+
+ if (cdb[0] == MODE_SELECT)
+ bdl = (buf[3]);
+ else
+ bdl = (buf[6] << 8) | (buf[7]);
+
+ if (!bdl)
+ goto after_mode_select;
+
+ if (cdb[0] == MODE_SELECT)
+ blocksize = (buf[9] << 16) | (buf[10] << 8) |
+ (buf[11]);
+ else
+ blocksize = (buf[13] << 16) | (buf[14] << 8) |
+ (buf[15]);
+
+ sd->sector_size = blocksize;
+ }
+after_mode_select:
+
+ if (status_byte(result) & CHECK_CONDITION)
+ return 1;
+
+ return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(struct se_cmd *cmd)
+{
+ struct pscsi_plugin_task *pt;
+ unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
+
+ pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+ if (!pt) {
+ printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+ return NULL;
+ }
+
+ /*
+ * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
+ * allocate the extended CDB buffer for per struct se_task context
+ * pt->pscsi_cdb now.
+ */
+ if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
+
+ pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
+ if (!(pt->pscsi_cdb)) {
+ printk(KERN_ERR "pSCSI: Unable to allocate extended"
+ " pt->pscsi_cdb\n");
+ return NULL;
+ }
+ } else
+ pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+
+ return &pt->pscsi_task;
+}
+
+static inline void pscsi_blk_init_request(
+ struct se_task *task,
+ struct pscsi_plugin_task *pt,
+ struct request *req,
+ int bidi_read)
+{
+ /*
+ * Defined as "scsi command" in include/linux/blkdev.h.
+ */
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ /*
+ * For the extra BIDI-COMMAND READ struct request we do not
+ * need to setup the remaining structure members
+ */
+ if (bidi_read)
+ return;
+ /*
+ * Setup the done function pointer for struct request,
+ * also set the end_io_data pointer.to struct se_task.
+ */
+ req->end_io = pscsi_req_done;
+ req->end_io_data = (void *)task;
+ /*
+ * Load the referenced struct se_task's SCSI CDB into
+ * include/linux/blkdev.h:struct request->cmd
+ */
+ req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ req->cmd = &pt->pscsi_cdb[0];
+ /*
+ * Setup pointer for outgoing sense data.
+ */
+ req->sense = (void *)&pt->pscsi_sense[0];
+ req->sense_len = 0;
+}
+
+/*
+ * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
+*/
+static int pscsi_blk_get_request(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+
+ pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
+ (task->task_data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
+ printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+ IS_ERR(pt->pscsi_req));
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+ * and setup rq callback, CDB and sense.
+ */
+ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+ return 0;
+}
+
+/* pscsi_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int pscsi_do_task(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ /*
+ * Set the struct request->timeout value based on peripheral
+ * device type from SCSI.
+ */
+ if (pdv->pdv_sd->type == TYPE_DISK)
+ pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
+ else
+ pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
+
+ pt->pscsi_req->retries = PS_RETRY;
+ /*
+ * Queue the struct request into the struct scsi_device->request_queue.
+ * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
+ * descriptor
+ */
+ blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
+ (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ pscsi_req_done);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct se_cmd *cmd = task->task_se_cmd;
+
+ /*
+ * Release the extended CDB allocation from pscsi_alloc_task()
+ * if one exists.
+ */
+ if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
+ kfree(pt->pscsi_cdb);
+ /*
+ * We do not release the bio(s) here associated with this task, as
+ * this is handled by bio_put() and pscsi_bi_endio().
+ */
+ kfree(pt);
+}
+
+enum {
+ Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+ Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_scsi_host_id, "scsi_host_id=%d"},
+ {Opt_scsi_channel_id, "scsi_channel_id=%d"},
+ {Opt_scsi_target_id, "scsi_target_id=%d"},
+ {Opt_scsi_lun_id, "scsi_lun_id=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page,
+ ssize_t count)
+{
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_scsi_host_id:
+ if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+ printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+ " scsi_host_id while phv_mode =="
+ " PHV_LLD_SCSI_HOST_NO\n",
+ phv->phv_host_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ match_int(args, &arg);
+ pdv->pdv_host_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+ " %d\n", phv->phv_host_id, pdv->pdv_host_id);
+ pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+ break;
+ case Opt_scsi_channel_id:
+ match_int(args, &arg);
+ pdv->pdv_channel_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+ " ID: %d\n", phv->phv_host_id,
+ pdv->pdv_channel_id);
+ pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+ break;
+ case Opt_scsi_target_id:
+ match_int(args, &arg);
+ pdv->pdv_target_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+ " ID: %d\n", phv->phv_host_id,
+ pdv->pdv_target_id);
+ pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+ break;
+ case Opt_scsi_lun_id:
+ match_int(args, &arg);
+ pdv->pdv_lun_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+ " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+ pdv->pdv_flags |= PDF_HAS_LUN_ID;
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev)
+{
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+ if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+ printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+ " scsi_lun_id= parameters\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+ unsigned char host_id[16];
+ ssize_t bl;
+ int i;
+
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+ else
+ snprintf(host_id, 16, "PHBA Mode");
+
+ bl = sprintf(b, "SCSI Device Bus Location:"
+ " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+ pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+ host_id);
+
+ if (sd) {
+ bl += sprintf(b + bl, " ");
+ bl += sprintf(b + bl, "Vendor: ");
+ for (i = 0; i < 8; i++) {
+ if (ISPRINT(sd->vendor[i])) /* printable character? */
+ bl += sprintf(b + bl, "%c", sd->vendor[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, " Model: ");
+ for (i = 0; i < 16; i++) {
+ if (ISPRINT(sd->model[i])) /* printable character ? */
+ bl += sprintf(b + bl, "%c", sd->model[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, " Rev: ");
+ for (i = 0; i < 4; i++) {
+ if (ISPRINT(sd->rev[i])) /* printable character ? */
+ bl += sprintf(b + bl, "%c", sd->rev[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+ bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+{
+ struct bio *bio;
+ /*
+ * Use bio_malloc() following the comment in for bio -> struct request
+ * in block/blk-core.c:blk_make_request()
+ */
+ bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ if (!(bio)) {
+ printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+ return NULL;
+ }
+ bio->bi_end_io = pscsi_bi_endio;
+
+ return bio;
+}
+
+#if 0
+#define DEBUG_PSCSI(x...) printk(x)
+#else
+#define DEBUG_PSCSI(x...)
+#endif
+
+static int __pscsi_map_task_SG(
+ struct se_task *task,
+ struct scatterlist *task_sg,
+ u32 task_sg_num,
+ int bidi_read)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct page *page;
+ struct scatterlist *sg;
+ u32 data_len = task->task_size, i, len, bytes, off;
+ int nr_pages = (task->task_size + task_sg[0].offset +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+ if (!task->task_size)
+ return 0;
+ /*
+ * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
+ * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+ * struct scatterlist memory. The struct se_task->task_sg[] currently needs
+ * to be attached to struct bios for submission to Linux/SCSI using
+ * struct request to struct scsi_device->request_queue.
+ *
+ * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
+ * is ported to upstream SCSI passthrough functionality that accepts
+ * struct scatterlist->page_link or struct page as a paraemeter.
+ */
+ DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+
+ for_each_sg(task_sg, sg, task_sg_num, i) {
+ page = sg_page(sg);
+ off = sg->offset;
+ len = sg->length;
+
+ DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+ page, len, off);
+
+ while (len > 0 && data_len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+ bytes = min(bytes, data_len);
+
+ if (!(bio)) {
+ nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+ nr_pages -= nr_vecs;
+ /*
+ * Calls bio_kmalloc() and sets bio->bi_end_io()
+ */
+ bio = pscsi_get_bio(pdv, nr_vecs);
+ if (!(bio))
+ goto fail;
+
+ if (rw)
+ bio->bi_rw |= REQ_WRITE;
+
+ DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+ " dir: %s nr_vecs: %d\n", bio,
+ (rw) ? "rw" : "r", nr_vecs);
+ /*
+ * Set *hbio pointer to handle the case:
+ * nr_pages > BIO_MAX_PAGES, where additional
+ * bios need to be added to complete a given
+ * struct se_task
+ */
+ if (!hbio)
+ hbio = tbio = bio;
+ else
+ tbio = tbio->bi_next = bio;
+ }
+
+ DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+ " bio: %p page: %p len: %d off: %d\n", i, bio,
+ page, len, off);
+
+ rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+ bio, page, bytes, off);
+ if (rc != bytes)
+ goto fail;
+
+ DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+ bio->bi_vcnt, nr_vecs);
+
+ if (bio->bi_vcnt > nr_vecs) {
+ DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+ " %d i: %d bio: %p, allocating another"
+ " bio\n", bio->bi_vcnt, i, bio);
+ /*
+ * Clear the pointer so that another bio will
+ * be allocated with pscsi_get_bio() above, the
+ * current bio has already been set *tbio and
+ * bio->bi_next.
+ */
+ bio = NULL;
+ }
+
+ page++;
+ len -= bytes;
+ data_len -= bytes;
+ off = 0;
+ }
+ }
+ /*
+ * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
+ * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
+ */
+ if (!(bidi_read)) {
+ /*
+ * Starting with v2.6.31, call blk_make_request() passing in *hbio to
+ * allocate the pSCSI task a struct request.
+ */
+ pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
+ hbio, GFP_KERNEL);
+ if (!(pt->pscsi_req)) {
+ printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+ goto fail;
+ }
+ /*
+ * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+ * and setup rq callback, CDB and sense.
+ */
+ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+
+ return task->task_sg_num;
+ }
+ /*
+ * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
+ * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
+ */
+ pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
+ hbio, GFP_KERNEL);
+ if (!(pt->pscsi_req->next_rq)) {
+ printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+ goto fail;
+ }
+ pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
+
+ return task->task_sg_num;
+fail:
+ while (hbio) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_endio(bio, 0);
+ }
+ return ret;
+}
+
+static int pscsi_map_task_SG(struct se_task *task)
+{
+ int ret;
+
+ /*
+ * Setup the main struct request for the task->task_sg[] payload
+ */
+
+ ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+ if (ret >= 0 && task->task_sg_bidi) {
+ /*
+ * If present, set up the extra BIDI-COMMAND SCSI READ
+ * struct request and payload.
+ */
+ ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
+ task->task_sg_num, 1);
+ }
+
+ if (ret < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ return 0;
+}
+
+/* pscsi_map_task_non_SG():
+ *
+ *
+ */
+static int pscsi_map_task_non_SG(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ int ret = 0;
+
+ if (pscsi_blk_get_request(task) < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ if (!task->task_size)
+ return 0;
+
+ ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
+ pt->pscsi_req, T_TASK(cmd)->t_task_buf,
+ task->task_size, GFP_KERNEL);
+ if (ret < 0) {
+ printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ return 0;
+}
+
+static int pscsi_CDB_none(struct se_task *task)
+{
+ return pscsi_blk_get_request(task);
+}
+
+/* pscsi_get_cdb():
+ *
+ *
+ */
+static unsigned char *pscsi_get_cdb(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ return pt->pscsi_cdb;
+}
+
+/* pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ return (unsigned char *)&pt->pscsi_sense[0];
+}
+
+/* pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/* pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ return sd->type;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+ if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+ return pdv->pdv_bd->bd_part->nr_sects;
+
+ dump_stack();
+ return 0;
+}
+
+/* pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+ struct se_task *task,
+ struct pscsi_plugin_task *pt)
+{
+ task->task_scsi_status = status_byte(pt->pscsi_result);
+ if ((task->task_scsi_status)) {
+ task->task_scsi_status <<= 1;
+ printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ pt->pscsi_result);
+ }
+
+ switch (host_byte(pt->pscsi_result)) {
+ case DID_OK:
+ transport_complete_task(task, (!task->task_scsi_status));
+ break;
+ default:
+ printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ pt->pscsi_result);
+ task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+ task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TASK_CMD(task)->transport_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ transport_complete_task(task, 0);
+ break;
+ }
+
+ return;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+ struct se_task *task = req->end_io_data;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ pt->pscsi_result = req->errors;
+ pt->pscsi_resid = req->resid_len;
+
+ pscsi_process_SAM_status(task, pt);
+ /*
+ * Release BIDI-READ if present
+ */
+ if (req->next_rq != NULL)
+ __blk_put_request(req->q, req->next_rq);
+
+ __blk_put_request(req->q, req);
+ pt->pscsi_req = NULL;
+}
+
+static struct se_subsystem_api pscsi_template = {
+ .name = "pscsi",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
+ .cdb_none = pscsi_CDB_none,
+ .map_task_non_SG = pscsi_map_task_non_SG,
+ .map_task_SG = pscsi_map_task_SG,
+ .attach_hba = pscsi_attach_hba,
+ .detach_hba = pscsi_detach_hba,
+ .pmode_enable_hba = pscsi_pmode_enable_hba,
+ .allocate_virtdevice = pscsi_allocate_virtdevice,
+ .create_virtdevice = pscsi_create_virtdevice,
+ .free_device = pscsi_free_device,
+ .transport_complete = pscsi_transport_complete,
+ .alloc_task = pscsi_alloc_task,
+ .do_task = pscsi_do_task,
+ .free_task = pscsi_free_task,
+ .check_configfs_dev_params = pscsi_check_configfs_dev_params,
+ .set_configfs_dev_params = pscsi_set_configfs_dev_params,
+ .show_configfs_dev_params = pscsi_show_configfs_dev_params,
+ .get_cdb = pscsi_get_cdb,
+ .get_sense_buffer = pscsi_get_sense_buffer,
+ .get_device_rev = pscsi_get_device_rev,
+ .get_device_type = pscsi_get_device_type,
+ .get_blocks = pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+ return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+ transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 000000000000..a4cd5d352c3a
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION "v4.0"
+#define PSCSI_VIRTUAL_HBA_DEPTH 2048
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE 0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH 1
+
+#define PS_RETRY 5
+#define PS_TIMEOUT_DISK (15*HZ)
+#define PS_TIMEOUT_OTHER (500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+ struct se_task pscsi_task;
+ unsigned char *pscsi_cdb;
+ unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
+ unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+ int pscsi_direction;
+ int pscsi_result;
+ u32 pscsi_resid;
+ struct request *pscsi_req;
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID 0x01
+#define PDF_HAS_TARGET_ID 0x02
+#define PDF_HAS_LUN_ID 0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT 0x10
+#define PDF_HAS_VIRT_HOST_ID 0x20
+
+struct pscsi_dev_virt {
+ int pdv_flags;
+ int pdv_host_id;
+ int pdv_channel_id;
+ int pdv_target_id;
+ int pdv_lun_id;
+ struct block_device *pdv_bd;
+ struct scsi_device *pdv_sd;
+ struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+ PHV_VIRUTAL_HOST_ID,
+ PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+ int phv_host_id;
+ phv_modes_t phv_mode;
+ struct Scsi_Host *phv_lld_host;
+} ____cacheline_aligned;
+
+#endif /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 000000000000..979aebf20019
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
+/*******************************************************************************
+ * Filename: target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_dr_template;
+static struct se_subsystem_api rd_mcp_template;
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/* rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct rd_host *rd_host;
+
+ rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+ if (!(rd_host)) {
+ printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+ return -ENOMEM;
+ }
+
+ rd_host->rd_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) rd_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+ " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+ rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+ RD_MAX_SECTORS);
+
+ return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+ struct rd_host *rd_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+ " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+ kfree(rd_host);
+ hba->hba_ptr = NULL;
+}
+
+/* rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 i, j, page_count = 0, sg_per_table;
+ struct rd_dev_sg_table *sg_table;
+ struct page *pg;
+ struct scatterlist *sg;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ sg_table = rd_dev->sg_table_array;
+
+ for (i = 0; i < rd_dev->sg_table_count; i++) {
+ sg = sg_table[i].sg_table;
+ sg_per_table = sg_table[i].rd_sg_count;
+
+ for (j = 0; j < sg_per_table; j++) {
+ pg = sg_page(&sg[j]);
+ if ((pg)) {
+ __free_page(pg);
+ page_count++;
+ }
+ }
+
+ kfree(sg);
+ }
+
+ printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ kfree(sg_table);
+ rd_dev->sg_table_array = NULL;
+ rd_dev->sg_table_count = 0;
+}
+
+
+/* rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ struct rd_dev_sg_table *sg_table;
+ struct page *pg;
+ struct scatterlist *sg;
+
+ if (rd_dev->rd_page_count <= 0) {
+ printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -1;
+ }
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!(sg_table)) {
+ printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+ " scatterlist tables\n");
+ return -1;
+ }
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ while (total_sg_needed) {
+ sg_per_table = (total_sg_needed > max_sg_per_table) ?
+ max_sg_per_table : total_sg_needed;
+
+ sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!(sg)) {
+ printk(KERN_ERR "Unable to allocate scatterlist array"
+ " for struct rd_dev\n");
+ return -1;
+ }
+
+ sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+ sg_table[i].sg_table = sg;
+ sg_table[i].rd_sg_count = sg_per_table;
+ sg_table[i].page_start_offset = page_offset;
+ sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+ - 1;
+
+ for (j = 0; j < sg_per_table; j++) {
+ pg = alloc_pages(GFP_KERNEL, 0);
+ if (!(pg)) {
+ printk(KERN_ERR "Unable to allocate scatterlist"
+ " pages for struct rd_dev_sg_table\n");
+ return -1;
+ }
+ sg_assign_page(&sg[j], pg);
+ sg[j].length = PAGE_SIZE;
+ }
+
+ page_offset += sg_per_table;
+ total_sg_needed -= sg_per_table;
+ }
+
+ printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void *rd_allocate_virtdevice(
+ struct se_hba *hba,
+ const char *name,
+ int rd_direct)
+{
+ struct rd_dev *rd_dev;
+ struct rd_host *rd_host = hba->hba_ptr;
+
+ rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+ if (!(rd_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+ return NULL;
+ }
+
+ rd_dev->rd_host = rd_host;
+ rd_dev->rd_direct = rd_direct;
+
+ return rd_dev;
+}
+
+static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ return rd_allocate_virtdevice(hba, name, 1);
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/* rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p,
+ int rd_direct)
+{
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct rd_dev *rd_dev = p;
+ struct rd_host *rd_host = hba->hba_ptr;
+ int dev_flags = 0;
+ char prod[16], rev[4];
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ if (rd_build_device_space(rd_dev) < 0)
+ goto fail;
+
+ snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+ snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+ RD_MCP_VERSION);
+
+ dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+ dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+ dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+ dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+ dev = transport_add_device_to_core_hba(hba,
+ (rd_dev->rd_direct) ? &rd_dr_template :
+ &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+ &dev_limits, prod, rev);
+ if (!(dev))
+ goto fail;
+
+ rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+ rd_dev->rd_queue_depth = dev->queue_depth;
+
+ printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ " %u pages in %u tables, %lu total bytes\n",
+ rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+ "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count,
+ (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+ return dev;
+
+fail:
+ rd_release_device_space(rd_dev);
+ return NULL;
+}
+
+static struct se_device *rd_DIRECT_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/* rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+ struct rd_dev *rd_dev = p;
+
+ rd_release_device_space(rd_dev);
+ kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+ return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(struct se_cmd *cmd)
+{
+ struct rd_request *rd_req;
+
+ rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+ if (!rd_req) {
+ printk(KERN_ERR "Unable to allocate struct rd_request\n");
+ return NULL;
+ }
+ rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
+
+ return &rd_req->rd_task;
+}
+
+/* rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+ u32 i;
+ struct rd_dev_sg_table *sg_table;
+
+ for (i = 0; i < rd_dev->sg_table_count; i++) {
+ sg_table = &rd_dev->sg_table_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
+ }
+
+ printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
+}
+
+/* rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(struct rd_request *req)
+{
+ struct se_task *task = &req->rd_task;
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct scatterlist *sg_d, *sg_s;
+ void *dst, *src;
+ u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+ u32 length, page_end = 0, table_sg_end;
+ u32 rd_offset = req->rd_offset;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_d = task->task_sg;
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+ " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+ req->rd_page, req->rd_offset);
+#endif
+ src_offset = rd_offset;
+
+ while (req->rd_size) {
+ if ((sg_d[i].length - dst_offset) <
+ (sg_s[j].length - src_offset)) {
+ length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+ " offset: %u sg_s[%d].length: %u\n", i,
+ &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+ sg_s[j].length);
+ printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+ " src_offset: %u\n", length, dst_offset,
+ src_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ dst = sg_virt(&sg_d[i++]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ src = sg_virt(&sg_s[j]) + src_offset;
+ if (!src)
+ BUG();
+
+ dst_offset = 0;
+ src_offset = length;
+ page_end = 0;
+ } else {
+ length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+ " offset: %u sg_s[%d].length: %u\n", i,
+ &sg_d[i], sg_d[i].length, sg_d[i].offset,
+ j, sg_s[j].length);
+ printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+ " src_offset: %u\n", length, dst_offset,
+ src_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ dst = sg_virt(&sg_d[i]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ if (sg_d[i].length == length) {
+ i++;
+ dst_offset = 0;
+ } else
+ dst_offset = length;
+
+ src = sg_virt(&sg_s[j++]) + src_offset;
+ if (!src)
+ BUG();
+
+ src_offset = 0;
+ page_end = 1;
+ }
+
+ memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ " i: %u, j: %u\n", req->rd_page,
+ (req->rd_size - length), length, i, j);
+#endif
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ return 0;
+
+ if (!page_end)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+ return 0;
+}
+
+/* rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(struct rd_request *req)
+{
+ struct se_task *task = &req->rd_task;
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct scatterlist *sg_d, *sg_s;
+ void *dst, *src;
+ u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+ u32 length, page_end = 0, table_sg_end;
+ u32 rd_offset = req->rd_offset;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+ sg_s = task->task_sg;
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+ " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+ req->rd_page, req->rd_offset);
+#endif
+ dst_offset = rd_offset;
+
+ while (req->rd_size) {
+ if ((sg_s[i].length - src_offset) <
+ (sg_d[j].length - dst_offset)) {
+ length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+ " offset: %d sg_d[%d].length: %u\n", i,
+ &sg_s[i], sg_s[i].length, sg_s[i].offset,
+ j, sg_d[j].length);
+ printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+ " dst_offset: %u\n", length, src_offset,
+ dst_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ src = sg_virt(&sg_s[i++]) + src_offset;
+ if (!src)
+ BUG();
+
+ dst = sg_virt(&sg_d[j]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ src_offset = 0;
+ dst_offset = length;
+ page_end = 0;
+ } else {
+ length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+ " offset: %d sg_d[%d].length: %u\n", i,
+ &sg_s[i], sg_s[i].length, sg_s[i].offset,
+ j, sg_d[j].length);
+ printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+ " dst_offset: %u\n", length, src_offset,
+ dst_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ src = sg_virt(&sg_s[i]) + src_offset;
+ if (!src)
+ BUG();
+
+ if (sg_s[i].length == length) {
+ i++;
+ src_offset = 0;
+ } else
+ src_offset = length;
+
+ dst = sg_virt(&sg_d[j++]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ dst_offset = 0;
+ page_end = 1;
+ }
+
+ memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ " i: %u, j: %u\n", req->rd_page,
+ (req->rd_size - length), length, i, j);
+#endif
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ return 0;
+
+ if (!page_end)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_d = &table->sg_table[j = 0];
+ }
+
+ return 0;
+}
+
+/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+ struct se_device *dev = task->se_dev;
+ struct rd_request *req = RD_REQ(task);
+ unsigned long long lba;
+ int ret;
+
+ req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+ lba = task->task_lba;
+ req->rd_offset = (do_div(lba,
+ (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+ DEV_ATTRIB(dev)->block_size;
+ req->rd_size = task->task_size;
+
+ if (task->task_data_direction == DMA_FROM_DEVICE)
+ ret = rd_MEMCPY_read(req);
+ else
+ ret = rd_MEMCPY_write(req);
+
+ if (ret != 0)
+ return ret;
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct rd_request *req = RD_REQ(task);
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct se_mem *se_mem;
+ struct scatterlist *sg_s;
+ u32 j = 0, set_offset = 1;
+ u32 get_next_table = 0, offset_length, table_sg_end;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ (task->task_data_direction == DMA_TO_DEVICE) ?
+ "Write" : "Read",
+ task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+ while (req->rd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+
+ if (set_offset) {
+ offset_length = sg_s[j].length - req->rd_offset;
+ if (offset_length > req->rd_size)
+ offset_length = req->rd_size;
+
+ se_mem->se_page = sg_page(&sg_s[j++]);
+ se_mem->se_off = req->rd_offset;
+ se_mem->se_len = offset_length;
+
+ set_offset = 0;
+ get_next_table = (j > table_sg_end);
+ goto check_eot;
+ }
+
+ offset_length = (req->rd_size < req->rd_offset) ?
+ req->rd_size : req->rd_offset;
+
+ se_mem->se_page = sg_page(&sg_s[j]);
+ se_mem->se_len = offset_length;
+
+ set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+ " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+ req->rd_page, req->rd_size, offset_length, j, se_mem,
+ se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+
+ req->rd_size -= offset_length;
+ if (!(req->rd_size))
+ goto out;
+
+ if (!set_offset && !get_next_table)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+out:
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+ *se_mem_cnt);
+#endif
+ return 0;
+}
+
+/* rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct rd_request *req = RD_REQ(task);
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct se_mem *se_mem;
+ struct scatterlist *sg_s;
+ u32 length, j = 0;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+ (task->task_data_direction == DMA_TO_DEVICE) ?
+ "Write" : "Read",
+ task->task_lba, req->rd_size, req->rd_page);
+#endif
+ while (req->rd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+
+ length = (req->rd_size < sg_s[j].length) ?
+ req->rd_size : sg_s[j].length;
+
+ se_mem->se_page = sg_page(&sg_s[j++]);
+ se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+ " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+ req->rd_size, j, se_mem, se_mem->se_page,
+ se_mem->se_off, se_mem->se_len);
+#endif
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ goto out;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+ printk("page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+out:
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+ *se_mem_cnt);
+#endif
+ return 0;
+}
+
+/* rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+static int rd_DIRECT_do_se_mem_map(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset_in)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct rd_request *req = RD_REQ(task);
+ u32 task_offset = *task_offset_in;
+ unsigned long long lba;
+ int ret;
+
+ req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+ PAGE_SIZE);
+ lba = task->task_lba;
+ req->rd_offset = (do_div(lba,
+ (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+ DEV_ATTRIB(task->se_dev)->block_size;
+ req->rd_size = task->task_size;
+
+ if (req->rd_offset)
+ ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+ task_offset_in);
+ else
+ ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+ task_offset_in);
+
+ if (ret < 0)
+ return ret;
+
+ if (CMD_TFO(cmd)->task_sg_chaining == 0)
+ return 0;
+ /*
+ * Currently prevent writers from multiple HW fabrics doing
+ * pci_map_sg() to RD_DR's internal scatterlist memory.
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ printk(KERN_ERR "DMA_TO_DEVICE not supported for"
+ " RAMDISK_DR with task_sg_chaining=1\n");
+ return -1;
+ }
+ /*
+ * Special case for if task_sg_chaining is enabled, then
+ * we setup struct se_task->task_sg[], as it will be used by
+ * transport_do_task_sg_chain() for creating chainged SGLs
+ * across multiple struct se_task->task_sg[].
+ */
+ if (!(transport_calc_sg_num(task,
+ list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list),
+ task_offset)))
+ return -1;
+
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+ list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list),
+ out_se_mem, se_mem_cnt, task_offset_in);
+}
+
+/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_DIRECT_do_task(struct se_task *task)
+{
+ /*
+ * At this point the locally allocated RD tables have been mapped
+ * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
+ */
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+ kfree(RD_REQ(task));
+}
+
+enum {
+ Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_rd_pages, "rd_pages=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page,
+ ssize_t count)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_rd_pages:
+ match_int(args, &arg);
+ rd_dev->rd_page_count = arg;
+ printk(KERN_INFO "RAMDISK: Referencing Page"
+ " Count: %u\n", rd_dev->rd_page_count);
+ rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+ if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+ printk(KERN_INFO "Missing rd_pages= parameter\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
+ rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+ "rd_direct" : "rd_mcp");
+ bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
+ " SG_table_count: %u\n", rd_dev->rd_page_count,
+ PAGE_SIZE, rd_dev->sg_table_count);
+ return bl;
+}
+
+/* rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *rd_get_cdb(struct se_task *task)
+{
+ struct rd_request *req = RD_REQ(task);
+
+ return req->rd_scsi_cdb;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = dev->dev_ptr;
+ unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+ DEV_ATTRIB(dev)->block_size) - 1;
+
+ return blocks_long;
+}
+
+static struct se_subsystem_api rd_dr_template = {
+ .name = "rd_dr",
+ .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
+ .attach_hba = rd_attach_hba,
+ .detach_hba = rd_detach_hba,
+ .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
+ .create_virtdevice = rd_DIRECT_create_virtdevice,
+ .free_device = rd_free_device,
+ .alloc_task = rd_alloc_task,
+ .do_task = rd_DIRECT_do_task,
+ .free_task = rd_free_task,
+ .check_configfs_dev_params = rd_check_configfs_dev_params,
+ .set_configfs_dev_params = rd_set_configfs_dev_params,
+ .show_configfs_dev_params = rd_show_configfs_dev_params,
+ .get_cdb = rd_get_cdb,
+ .get_device_rev = rd_get_device_rev,
+ .get_device_type = rd_get_device_type,
+ .get_blocks = rd_get_blocks,
+ .do_se_mem_map = rd_DIRECT_do_se_mem_map,
+};
+
+static struct se_subsystem_api rd_mcp_template = {
+ .name = "rd_mcp",
+ .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
+ .attach_hba = rd_attach_hba,
+ .detach_hba = rd_detach_hba,
+ .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
+ .create_virtdevice = rd_MEMCPY_create_virtdevice,
+ .free_device = rd_free_device,
+ .alloc_task = rd_alloc_task,
+ .do_task = rd_MEMCPY_do_task,
+ .free_task = rd_free_task,
+ .check_configfs_dev_params = rd_check_configfs_dev_params,
+ .set_configfs_dev_params = rd_set_configfs_dev_params,
+ .show_configfs_dev_params = rd_show_configfs_dev_params,
+ .get_cdb = rd_get_cdb,
+ .get_device_rev = rd_get_device_rev,
+ .get_device_type = rd_get_device_type,
+ .get_blocks = rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+ int ret;
+
+ ret = transport_subsystem_register(&rd_dr_template);
+ if (ret < 0)
+ return ret;
+
+ ret = transport_subsystem_register(&rd_mcp_template);
+ if (ret < 0) {
+ transport_subsystem_release(&rd_dr_template);
+ return ret;
+ }
+
+ return 0;
+}
+
+void rd_module_exit(void)
+{
+ transport_subsystem_release(&rd_dr_template);
+ transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 000000000000..13badfbaf9c0
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION "v4.0"
+#define RD_DR_VERSION "4.0"
+#define RD_MCP_VERSION "4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE 65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH 256
+#define RD_DEVICE_QUEUE_DEPTH 32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE 512
+#define RD_MAX_SECTORS 1024
+
+extern struct kmem_cache *se_mem_cache;
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB 0x01
+#define RRF_GOT_LBA 0x02
+
+struct rd_request {
+ struct se_task rd_task;
+
+ /* SCSI CDB from iSCSI Command PDU */
+ unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ /* Offset from start of page */
+ u32 rd_offset;
+ /* Starting page in Ramdisk for request */
+ u32 rd_page;
+ /* Total number of pages needed for request */
+ u32 rd_page_count;
+ /* Scatterlist count */
+ u32 rd_size;
+ /* Ramdisk device */
+ struct rd_dev *rd_dev;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+ u32 page_start_offset;
+ u32 page_end_offset;
+ u32 rd_sg_count;
+ struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT 0x01
+
+struct rd_dev {
+ int rd_direct;
+ u32 rd_flags;
+ /* Unique Ramdisk Device ID in Ramdisk HBA */
+ u32 rd_dev_id;
+ /* Total page count for ramdisk device */
+ u32 rd_page_count;
+ /* Number of SG tables in sg_table_array */
+ u32 sg_table_count;
+ u32 rd_queue_depth;
+ /* Array of rd_dev_sg_table_t containing scatterlists */
+ struct rd_dev_sg_table *sg_table_array;
+ /* Ramdisk HBA device is connected to */
+ struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+ u32 rd_host_dev_id_count;
+ u32 rd_host_id; /* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 000000000000..dc6fed037ab3
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * Filename: target_core_scdb.c
+ *
+ * This file contains the generic target engine Split CDB related functions.
+ *
+ * Copyright (c) 2004-2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_scdb.h"
+
+/* split_cdb_XX_6():
+ *
+ * 21-bit LBA w/ 8-bit SECTORS
+ */
+void split_cdb_XX_6(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ cdb[1] = (lba >> 16) & 0x1f;
+ cdb[2] = (lba >> 8) & 0xff;
+ cdb[3] = lba & 0xff;
+ cdb[4] = *sectors & 0xff;
+}
+
+/* split_cdb_XX_10():
+ *
+ * 32-bit LBA w/ 16-bit SECTORS
+ */
+void split_cdb_XX_10(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be16(*sectors, &cdb[7]);
+}
+
+/* split_cdb_XX_12():
+ *
+ * 32-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_12(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be32(*sectors, &cdb[6]);
+}
+
+/* split_cdb_XX_16():
+ *
+ * 64-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_16(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be64(lba, &cdb[2]);
+ put_unaligned_be32(*sectors, &cdb[10]);
+}
+
+/*
+ * split_cdb_XX_32():
+ *
+ * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
+ */
+void split_cdb_XX_32(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be64(lba, &cdb[12]);
+ put_unaligned_be32(*sectors, &cdb[28]);
+}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 000000000000..98cd1c01ed83
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
+#ifndef TARGET_CORE_SCDB_H
+#define TARGET_CORE_SCDB_H
+
+extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+
+#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 000000000000..158cecbec718
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,404 @@
+/*******************************************************************************
+ * Filename: target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define DEBUG_LUN_RESET
+#ifdef DEBUG_LUN_RESET
+#define DEBUG_LR(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_LR(x...)
+#endif
+
+struct se_tmr_req *core_tmr_alloc_req(
+ struct se_cmd *se_cmd,
+ void *fabric_tmr_ptr,
+ u8 function)
+{
+ struct se_tmr_req *tmr;
+
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ if (!(tmr)) {
+ printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tmr->task_cmd = se_cmd;
+ tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+ tmr->function = function;
+ INIT_LIST_HEAD(&tmr->tmr_list);
+
+ return tmr;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+ struct se_tmr_req *tmr)
+{
+ struct se_device *dev = tmr->tmr_dev;
+
+ spin_lock(&dev->se_tmr_lock);
+ list_del(&tmr->tmr_list);
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ spin_unlock(&dev->se_tmr_lock);
+}
+
+static void core_tmr_handle_tas_abort(
+ struct se_node_acl *tmr_nacl,
+ struct se_cmd *cmd,
+ int tas,
+ int fe_count)
+{
+ if (!(fe_count)) {
+ transport_cmd_finish_abort(cmd, 1);
+ return;
+ }
+ /*
+ * TASK ABORTED status (TAS) bit support
+ */
+ if (((tmr_nacl != NULL) &&
+ (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+ transport_send_task_abort(cmd);
+
+ transport_cmd_finish_abort(cmd, 0);
+}
+
+int core_tmr_lun_reset(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *prout_cmd)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr, *qr_tmp;
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
+ struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_tmr_req *tmr_p, *tmr_pp;
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int fe_count, state, tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+ * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ tas = DEV_ATTRIB(dev)->emulate_tas;
+ /*
+ * Determine if this se_tmr is coming from a $FABRIC_MOD
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+ tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+ tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+ TPG_TFO(tmr_tpg)->get_fabric_name(),
+ tmr_nacl->initiatorname);
+ }
+ }
+ DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ TRANSPORT(dev)->name, tas);
+ /*
+ * Release all pending and outgoing TMRs aside from the received
+ * LUN_RESET tmr..
+ */
+ spin_lock(&dev->se_tmr_lock);
+ list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+ /*
+ * Allow the received TMR to return with FUNCTION_COMPLETE.
+ */
+ if (tmr && (tmr_p == tmr))
+ continue;
+
+ cmd = tmr_p->task_cmd;
+ if (!(cmd)) {
+ printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+ continue;
+ }
+ /*
+ * If this function was called with a valid pr_res_key
+ * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+ * skip non regisration key matching TMRs.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ spin_unlock(&dev->se_tmr_lock);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock(&dev->se_tmr_lock);
+ continue;
+ }
+ if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock(&dev->se_tmr_lock);
+ continue;
+ }
+ DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+ " Response: 0x%02x, t_state: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ tmr_p->function, tmr_p->response, cmd->t_state);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_cmd_finish_abort_tmr(cmd);
+ spin_lock(&dev->se_tmr_lock);
+ }
+ spin_unlock(&dev->se_tmr_lock);
+ /*
+ * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+ * This is following sam4r17, section 5.6 Aborting commands, Table 38
+ * for TMR LUN_RESET:
+ *
+ * a) "Yes" indicates that each command that is aborted on an I_T nexus
+ * other than the one that caused the SCSI device condition is
+ * completed with TASK ABORTED status, if the TAS bit is set to one in
+ * the Control mode page (see SPC-4). "No" indicates that no status is
+ * returned for aborted commands.
+ *
+ * d) If the logical unit reset is caused by a particular I_T nexus
+ * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+ * (TASK_ABORTED status) applies.
+ *
+ * Otherwise (e.g., if triggered by a hard reset), "no"
+ * (no TASK_ABORTED SAM status) applies.
+ *
+ * Note that this seems to be independent of TAS (Task Aborted Status)
+ * in the Control Mode Page.
+ */
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+ t_state_list) {
+ if (!(TASK_CMD(task))) {
+ printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ continue;
+ }
+ cmd = TASK_CMD(task);
+
+ if (!T_TASK(cmd)) {
+ printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+ " %p ITT: 0x%08x\n", task, cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ continue;
+ }
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ list_del(&task->t_state_list);
+ atomic_set(&task->task_state_active, 0);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+ " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
+ "def_t_state: %d/%d cdb: 0x%02x\n",
+ (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+ CMD_TFO(cmd)->get_task_tag(cmd), 0,
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ " t_task_cdbs: %d t_task_cdbs_left: %d"
+ " t_task_cdbs_sent: %d -- t_transport_active: %d"
+ " t_transport_stop: %d t_transport_sent: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+ " for dev: %p\n", task, dev);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+ " dev: %p\n", task, dev);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ }
+ __transport_stop_task_timer(task, &flags);
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+ " t_task_cdbs_ex_left: %d\n", task, dev,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+ DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ " task: %p, t_fe_count: %d dev: %p\n", task,
+ fe_count, dev);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ " t_fe_count: %d dev: %p\n", task, fe_count, dev);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Release all commands remaining in the struct se_device cmd queue.
+ *
+ * This follows the same logic as above for the struct se_device
+ * struct se_task state list, where commands are returned with
+ * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+ * reference, otherwise the struct se_cmd is released.
+ */
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
+ cmd = (struct se_cmd *)qr->cmd;
+ if (!(cmd)) {
+ /*
+ * Skip these for non PREEMPT_AND_ABORT usage..
+ */
+ if (preempt_and_abort_list != NULL)
+ continue;
+
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ kfree(qr);
+ continue;
+ }
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ state = qr->state;
+ kfree(qr);
+
+ DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+ " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+ "Preempt" : "", cmd, state,
+ atomic_read(&T_TASK(cmd)->t_fe_count));
+ /*
+ * Signal that the command has failed via cmd->se_cmd_flags,
+ * and call TFO->new_cmd_failure() to wakeup any fabric
+ * dependent code used to wait for unsolicited data out
+ * allocation to complete. The fabric module is expected
+ * to dump any remaining unsolicited data out for the aborted
+ * command at this point.
+ */
+ transport_new_cmd_failure(cmd);
+
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+ atomic_read(&T_TASK(cmd)->t_fe_count));
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ /*
+ * Clear any legacy SPC-2 reservation when called during
+ * LOGICAL UNIT RESET
+ */
+ if (!(preempt_and_abort_list) &&
+ (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+ spin_lock(&dev->dev_reservation_lock);
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ spin_unlock(&dev->dev_reservation_lock);
+ printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ }
+
+ spin_lock(&dev->stats_lock);
+ dev->num_resets++;
+ spin_unlock(&dev->stats_lock);
+
+ DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ TRANSPORT(dev)->name);
+ return 0;
+}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 000000000000..abfa81a57115
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,826 @@
+/*******************************************************************************
+ * Filename: target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_hba.h"
+
+/* core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ int i;
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_lun_acl *acl, *acl_tmp;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ if (!deve->se_lun) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+
+ lun = deve->se_lun;
+ spin_unlock_irq(&nacl->device_list_lock);
+ core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+ spin_lock(&lun->lun_acl_lock);
+ list_for_each_entry_safe(acl, acl_tmp,
+ &lun->lun_acl_list, lacl_list) {
+ if (!(strcmp(acl->initiatorname,
+ nacl->initiatorname)) &&
+ (acl->mapped_lun == deve->mapped_lun))
+ break;
+ }
+
+ if (!acl) {
+ printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+ " mapped_lun: %u\n", nacl->initiatorname,
+ deve->mapped_lun);
+ spin_unlock(&lun->lun_acl_lock);
+ spin_lock_irq(&nacl->device_list_lock);
+ continue;
+ }
+
+ list_del(&acl->lacl_list);
+ spin_unlock(&lun->lun_acl_lock);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ kfree(acl);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/* __core_tpg_get_initiator_node_acl():
+ *
+ * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+ struct se_portal_group *tpg,
+ const char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (!(strcmp(acl->initiatorname, initiatorname)))
+ return acl;
+ }
+
+ return NULL;
+}
+
+/* core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (!(strcmp(acl->initiatorname, initiatorname)) &&
+ (!(acl->dynamic_node_acl))) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return acl;
+ }
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return NULL;
+}
+
+/* core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+ struct se_node_acl *acl,
+ struct se_portal_group *tpg)
+{
+ int i = 0;
+ u32 lun_access = 0;
+ struct se_lun *lun;
+ struct se_device *dev;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &tpg->tpg_lun_list[i];
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ continue;
+
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ dev = lun->lun_se_dev;
+ /*
+ * By default in LIO-Target $FABRIC_MOD,
+ * demo_mode_write_protect is ON, or READ_ONLY;
+ */
+ if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ /*
+ * Allow only optical drives to issue R/W in default RO
+ * demo mode.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ }
+
+ printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ " access for LUN in Demo Mode\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+ "READ-WRITE" : "READ-ONLY");
+
+ core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg, 1);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/* core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl)
+{
+ if (!acl->queue_depth) {
+ printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+ acl->initiatorname);
+ acl->queue_depth = 1;
+ }
+
+ return 0;
+}
+
+/* core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+ int i;
+
+ nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
+ TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+ if (!(nacl->device_list)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_node_acl->device_list\n");
+ return -1;
+ }
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ atomic_set(&deve->ua_count, 0);
+ atomic_set(&deve->pr_ref_count, 0);
+ spin_lock_init(&deve->ua_lock);
+ INIT_LIST_HEAD(&deve->alua_port_list);
+ INIT_LIST_HEAD(&deve->ua_list);
+ }
+
+ return 0;
+}
+
+/* core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if ((acl))
+ return acl;
+
+ if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+ return NULL;
+
+ acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
+ if (!(acl))
+ return NULL;
+
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ spin_lock_init(&acl->device_list_lock);
+ spin_lock_init(&acl->nacl_sess_lock);
+ atomic_set(&acl->acl_pr_ref_count, 0);
+ atomic_set(&acl->mib_ref_count, 0);
+ acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+ spin_lock_init(&acl->stats_lock);
+ acl->dynamic_node_acl = 1;
+
+ TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+ if (core_create_device_list_for_node(acl) < 0) {
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return NULL;
+ }
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ core_free_device_list_for_node(acl, tpg);
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return NULL;
+ }
+
+ core_tpg_add_node_to_devs(acl, tpg);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ tpg->num_node_acls++;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+ return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+ while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+ cpu_relax();
+}
+
+void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
+{
+ while (atomic_read(&nacl->mib_ref_count) != 0)
+ cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+ int i, ret;
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &tpg->tpg_lun_list[i];
+
+ if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+ (lun->lun_se_dev == NULL))
+ continue;
+
+ spin_unlock(&tpg->tpg_lun_lock);
+ ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/* core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+ struct se_portal_group *tpg,
+ struct se_node_acl *se_nacl,
+ const char *initiatorname,
+ u32 queue_depth)
+{
+ struct se_node_acl *acl = NULL;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if ((acl)) {
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+ " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+ spin_unlock_bh(&tpg->acl_node_lock);
+ /*
+ * Release the locally allocated struct se_node_acl
+ * because * core_tpg_add_initiator_node_acl() returned
+ * a pointer to an existing demo mode node ACL.
+ */
+ if (se_nacl)
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+ se_nacl);
+ goto done;
+ }
+
+ printk(KERN_ERR "ACL entry for %s Initiator"
+ " Node %s already exists for TPG %u, ignoring"
+ " request.\n", TPG_TFO(tpg)->get_fabric_name(),
+ initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return ERR_PTR(-EEXIST);
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ if (!(se_nacl)) {
+ printk("struct se_node_acl pointer is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * For v4.x logic the se_node_acl_s is hanging off a fabric
+ * dependent structure allocated via
+ * struct target_core_fabric_ops->fabric_make_nodeacl()
+ */
+ acl = se_nacl;
+
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ spin_lock_init(&acl->device_list_lock);
+ spin_lock_init(&acl->nacl_sess_lock);
+ atomic_set(&acl->acl_pr_ref_count, 0);
+ acl->queue_depth = queue_depth;
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+ spin_lock_init(&acl->stats_lock);
+
+ TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+ if (core_create_device_list_for_node(acl) < 0) {
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ core_free_device_list_for_node(acl, tpg);
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ tpg->num_node_acls++;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+done:
+ printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+ return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/* core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ int force)
+{
+ struct se_session *sess, *sess_tmp;
+ int dynamic_acl = 0;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ dynamic_acl = 1;
+ }
+ list_del(&acl->acl_list);
+ tpg->num_node_acls--;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_bh(&tpg->session_lock);
+ list_for_each_entry_safe(sess, sess_tmp,
+ &tpg->tpg_sess_list, sess_list) {
+ if (sess->se_node_acl != acl)
+ continue;
+ /*
+ * Determine if the session needs to be closed by our context.
+ */
+ if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ continue;
+
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+ * forcefully shutdown the $FABRIC_MOD session/nexus.
+ */
+ TPG_TFO(tpg)->close_session(sess);
+
+ spin_lock_bh(&tpg->session_lock);
+ }
+ spin_unlock_bh(&tpg->session_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(acl);
+ core_tpg_wait_for_mib_ref(acl);
+ core_clear_initiator_node_from_tpg(acl, tpg);
+ core_free_device_list_for_node(acl, tpg);
+
+ printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/* core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname,
+ u32 queue_depth,
+ int force)
+{
+ struct se_session *sess, *init_sess = NULL;
+ struct se_node_acl *acl;
+ int dynamic_acl = 0;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if (!(acl)) {
+ printk(KERN_ERR "Access Control List entry for %s Initiator"
+ " Node %s does not exists for TPG %hu, ignoring"
+ " request.\n", TPG_TFO(tpg)->get_fabric_name(),
+ initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -ENODEV;
+ }
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ dynamic_acl = 1;
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_bh(&tpg->session_lock);
+ list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+ if (sess->se_node_acl != acl)
+ continue;
+
+ if (!force) {
+ printk(KERN_ERR "Unable to change queue depth for %s"
+ " Initiator Node: %s while session is"
+ " operational. To forcefully change the queue"
+ " depth and force session reinstatement"
+ " use the \"force=1\" parameter.\n",
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ spin_unlock_bh(&tpg->session_lock);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -EEXIST;
+ }
+ /*
+ * Determine if the session needs to be closed by our context.
+ */
+ if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ continue;
+
+ init_sess = sess;
+ break;
+ }
+
+ /*
+ * User has requested to change the queue depth for a Initiator Node.
+ * Change the value in the Node's struct se_node_acl, and call
+ * core_set_queue_depth_for_node() to add the requested queue depth.
+ *
+ * Finally call TPG_TFO(tpg)->close_session() to force session
+ * reinstatement to occur if there is an active session for the
+ * $FABRIC_MOD Initiator Node in question.
+ */
+ acl->queue_depth = queue_depth;
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * Force session reinstatement if
+ * core_set_queue_depth_for_node() failed, because we assume
+ * the $FABRIC_MOD has already the set session reinstatement
+ * bit from TPG_TFO(tpg)->shutdown_session() called above.
+ */
+ if (init_sess)
+ TPG_TFO(tpg)->close_session(init_sess);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+ * forcefully shutdown the $FABRIC_MOD session/nexus.
+ */
+ if (init_sess)
+ TPG_TFO(tpg)->close_session(init_sess);
+
+ printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+ " Node: %s on %s Target Portal Group: %u\n", queue_depth,
+ initiatorname, TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+ /* Set in core_dev_setup_virtual_lun0() */
+ struct se_device *dev = se_global->g_lun0_dev;
+ struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+ u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ int ret;
+
+ lun->unpacked_lun = 0;
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_acl_list);
+ INIT_LIST_HEAD(&lun->lun_cmd_list);
+ spin_lock_init(&lun->lun_acl_lock);
+ spin_lock_init(&lun->lun_cmd_lock);
+ spin_lock_init(&lun->lun_sep_lock);
+
+ ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+ struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+ core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+ struct target_core_fabric_ops *tfo,
+ struct se_wwn *se_wwn,
+ struct se_portal_group *se_tpg,
+ void *tpg_fabric_ptr,
+ int se_tpg_type)
+{
+ struct se_lun *lun;
+ u32 i;
+
+ se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
+ TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+ if (!(se_tpg->tpg_lun_list)) {
+ printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+ "tpg_lun_list\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &se_tpg->tpg_lun_list[i];
+ lun->unpacked_lun = i;
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_acl_list);
+ INIT_LIST_HEAD(&lun->lun_cmd_list);
+ spin_lock_init(&lun->lun_acl_lock);
+ spin_lock_init(&lun->lun_cmd_lock);
+ spin_lock_init(&lun->lun_sep_lock);
+ }
+
+ se_tpg->se_tpg_type = se_tpg_type;
+ se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+ se_tpg->se_tpg_tfo = tfo;
+ se_tpg->se_tpg_wwn = se_wwn;
+ atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+ INIT_LIST_HEAD(&se_tpg->acl_node_list);
+ INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+ INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+ spin_lock_init(&se_tpg->acl_node_lock);
+ spin_lock_init(&se_tpg->session_lock);
+ spin_lock_init(&se_tpg->tpg_lun_lock);
+
+ if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+ if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+ kfree(se_tpg);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_bh(&se_global->se_tpg_lock);
+ list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
+ spin_unlock_bh(&se_global->se_tpg_lock);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+ " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+ (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+ "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+ "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+ printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+ " for endpoint: %s Portal Tag %u\n",
+ (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+ "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
+ TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+ spin_lock_bh(&se_global->se_tpg_lock);
+ list_del(&se_tpg->se_tpg_list);
+ spin_unlock_bh(&se_global->se_tpg_lock);
+
+ while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+ cpu_relax();
+
+ if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+ core_tpg_release_virtual_lun0(se_tpg);
+
+ se_tpg->se_tpg_fabric_ptr = NULL;
+ kfree(se_tpg->tpg_lun_list);
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return ERR_PTR(-EOVERFLOW);
+ }
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+ if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+ " on %s Target Portal Group: %u, ignoring request.\n",
+ unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return ERR_PTR(-EINVAL);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+int core_tpg_post_addlun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ u32 lun_access,
+ void *lun_ptr)
+{
+ if (core_dev_export(lun_ptr, tpg, lun) < 0)
+ return -1;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun->lun_access = lun_access;
+ lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return 0;
+}
+
+static void core_tpg_shutdown_lun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun,
+ int *ret)
+{
+ struct se_lun *lun;
+
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return ERR_PTR(-EOVERFLOW);
+ }
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %u, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return ERR_PTR(-ENODEV);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+int core_tpg_post_dellun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ core_tpg_shutdown_lun(tpg, lun);
+
+ core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return 0;
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 000000000000..28b6292ff298
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6134 @@
+/*******************************************************************************
+ * Filename: target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_scdb.h"
+#include "target_core_ua.h"
+
+/* #define DEBUG_CDB_HANDLER */
+#ifdef DEBUG_CDB_HANDLER
+#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CDB_H(x...)
+#endif
+
+/* #define DEBUG_CMD_MAP */
+#ifdef DEBUG_CMD_MAP
+#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CMD_M(x...)
+#endif
+
+/* #define DEBUG_MEM_ALLOC */
+#ifdef DEBUG_MEM_ALLOC
+#define DEBUG_MEM(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM(x...)
+#endif
+
+/* #define DEBUG_MEM2_ALLOC */
+#ifdef DEBUG_MEM2_ALLOC
+#define DEBUG_MEM2(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM2(x...)
+#endif
+
+/* #define DEBUG_SG_CALC */
+#ifdef DEBUG_SG_CALC
+#define DEBUG_SC(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SC(x...)
+#endif
+
+/* #define DEBUG_SE_OBJ */
+#ifdef DEBUG_SE_OBJ
+#define DEBUG_SO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SO(x...)
+#endif
+
+/* #define DEBUG_CMD_VOL */
+#ifdef DEBUG_CMD_VOL
+#define DEBUG_VOL(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_VOL(x...)
+#endif
+
+/* #define DEBUG_CMD_STOP */
+#ifdef DEBUG_CMD_STOP
+#define DEBUG_CS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CS(x...)
+#endif
+
+/* #define DEBUG_PASSTHROUGH */
+#ifdef DEBUG_PASSTHROUGH
+#define DEBUG_PT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_PT(x...)
+#endif
+
+/* #define DEBUG_TASK_STOP */
+#ifdef DEBUG_TASK_STOP
+#define DEBUG_TS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TS(x...)
+#endif
+
+/* #define DEBUG_TRANSPORT_STOP */
+#ifdef DEBUG_TRANSPORT_STOP
+#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TRANSPORT_S(x...)
+#endif
+
+/* #define DEBUG_TASK_FAILURE */
+#ifdef DEBUG_TASK_FAILURE
+#define DEBUG_TF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TF(x...)
+#endif
+
+/* #define DEBUG_DEV_OFFLINE */
+#ifdef DEBUG_DEV_OFFLINE
+#define DEBUG_DO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_DO(x...)
+#endif
+
+/* #define DEBUG_TASK_STATE */
+#ifdef DEBUG_TASK_STATE
+#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TSTATE(x...)
+#endif
+
+/* #define DEBUG_STATUS_THR */
+#ifdef DEBUG_STATUS_THR
+#define DEBUG_ST(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_ST(x...)
+#endif
+
+/* #define DEBUG_TASK_TIMEOUT */
+#ifdef DEBUG_TASK_TIMEOUT
+#define DEBUG_TT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TT(x...)
+#endif
+
+/* #define DEBUG_GENERIC_REQUEST_FAILURE */
+#ifdef DEBUG_GENERIC_REQUEST_FAILURE
+#define DEBUG_GRF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_GRF(x...)
+#endif
+
+/* #define DEBUG_SAM_TASK_ATTRS */
+#ifdef DEBUG_SAM_TASK_ATTRS
+#define DEBUG_STA(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_STA(x...)
+#endif
+
+struct se_global *se_global;
+
+static struct kmem_cache *se_cmd_cache;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_tmr_req_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *se_mem_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+/* Used for transport_dev_get_map_*() */
+typedef int (*map_func_t)(struct se_task *, u32);
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *);
+static int __transport_execute_tasks(struct se_device *dev);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_direct_request_timeout(struct se_cmd *cmd);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
+ unsigned long long starting_lba, u32 sectors,
+ enum dma_data_direction data_direction,
+ struct list_head *mem_list, int set_counts);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
+ u32 dma_size);
+static int transport_generic_remove(struct se_cmd *cmd,
+ int release_to_pool, int session_reinstatement);
+static int transport_get_sectors(struct se_cmd *cmd);
+static struct list_head *transport_init_se_mem_list(void);
+static int transport_map_sg_to_mem(struct se_cmd *cmd,
+ struct list_head *se_mem_list, void *in_mem,
+ u32 *se_mem_cnt);
+static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
+ unsigned char *dst, struct list_head *se_mem_list);
+static void transport_release_fe_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+ struct se_queue_obj *qobj);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void transport_stop_all_task_timers(struct se_cmd *cmd);
+
+int transport_emulate_control_cdb(struct se_task *task);
+
+int init_se_global(void)
+{
+ struct se_global *global;
+
+ global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
+ if (!(global)) {
+ printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
+ return -1;
+ }
+
+ INIT_LIST_HEAD(&global->g_lu_gps_list);
+ INIT_LIST_HEAD(&global->g_se_tpg_list);
+ INIT_LIST_HEAD(&global->g_hba_list);
+ INIT_LIST_HEAD(&global->g_se_dev_list);
+ spin_lock_init(&global->g_device_lock);
+ spin_lock_init(&global->hba_lock);
+ spin_lock_init(&global->se_tpg_lock);
+ spin_lock_init(&global->lu_gps_lock);
+ spin_lock_init(&global->plugin_class_lock);
+
+ se_cmd_cache = kmem_cache_create("se_cmd_cache",
+ sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
+ if (!(se_cmd_cache)) {
+ printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+ goto out;
+ }
+ se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
+ sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
+ 0, NULL);
+ if (!(se_tmr_req_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+ " failed\n");
+ goto out;
+ }
+ se_sess_cache = kmem_cache_create("se_sess_cache",
+ sizeof(struct se_session), __alignof__(struct se_session),
+ 0, NULL);
+ if (!(se_sess_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_session"
+ " failed\n");
+ goto out;
+ }
+ se_ua_cache = kmem_cache_create("se_ua_cache",
+ sizeof(struct se_ua), __alignof__(struct se_ua),
+ 0, NULL);
+ if (!(se_ua_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+ goto out;
+ }
+ se_mem_cache = kmem_cache_create("se_mem_cache",
+ sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
+ if (!(se_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+ goto out;
+ }
+ t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+ sizeof(struct t10_pr_registration),
+ __alignof__(struct t10_pr_registration), 0, NULL);
+ if (!(t10_pr_reg_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+ " failed\n");
+ goto out;
+ }
+ t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+ sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+ 0, NULL);
+ if (!(t10_alua_lu_gp_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+ " failed\n");
+ goto out;
+ }
+ t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+ sizeof(struct t10_alua_lu_gp_member),
+ __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+ if (!(t10_alua_lu_gp_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+ "cache failed\n");
+ goto out;
+ }
+ t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+ sizeof(struct t10_alua_tg_pt_gp),
+ __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+ if (!(t10_alua_tg_pt_gp_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ "cache failed\n");
+ goto out;
+ }
+ t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+ "t10_alua_tg_pt_gp_mem_cache",
+ sizeof(struct t10_alua_tg_pt_gp_member),
+ __alignof__(struct t10_alua_tg_pt_gp_member),
+ 0, NULL);
+ if (!(t10_alua_tg_pt_gp_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ "mem_t failed\n");
+ goto out;
+ }
+
+ se_global = global;
+
+ return 0;
+out:
+ if (se_cmd_cache)
+ kmem_cache_destroy(se_cmd_cache);
+ if (se_tmr_req_cache)
+ kmem_cache_destroy(se_tmr_req_cache);
+ if (se_sess_cache)
+ kmem_cache_destroy(se_sess_cache);
+ if (se_ua_cache)
+ kmem_cache_destroy(se_ua_cache);
+ if (se_mem_cache)
+ kmem_cache_destroy(se_mem_cache);
+ if (t10_pr_reg_cache)
+ kmem_cache_destroy(t10_pr_reg_cache);
+ if (t10_alua_lu_gp_cache)
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+ if (t10_alua_lu_gp_mem_cache)
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+ if (t10_alua_tg_pt_gp_cache)
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+ if (t10_alua_tg_pt_gp_mem_cache)
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kfree(global);
+ return -1;
+}
+
+void release_se_global(void)
+{
+ struct se_global *global;
+
+ global = se_global;
+ if (!(global))
+ return;
+
+ kmem_cache_destroy(se_cmd_cache);
+ kmem_cache_destroy(se_tmr_req_cache);
+ kmem_cache_destroy(se_sess_cache);
+ kmem_cache_destroy(se_ua_cache);
+ kmem_cache_destroy(se_mem_cache);
+ kmem_cache_destroy(t10_pr_reg_cache);
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kfree(global);
+
+ se_global = NULL;
+}
+
+void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+ atomic_set(&qobj->queue_cnt, 0);
+ INIT_LIST_HEAD(&qobj->qobj_list);
+ init_waitqueue_head(&qobj->thread_wq);
+ spin_lock_init(&qobj->cmd_queue_lock);
+}
+EXPORT_SYMBOL(transport_init_queue_obj);
+
+static int transport_subsystem_reqmods(void)
+{
+ int ret;
+
+ ret = request_module("target_core_iblock");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_iblock\n");
+
+ ret = request_module("target_core_file");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_file\n");
+
+ ret = request_module("target_core_pscsi");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_pscsi\n");
+
+ ret = request_module("target_core_stgt");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_stgt\n");
+
+ return 0;
+}
+
+int transport_subsystem_check_init(void)
+{
+ if (se_global->g_sub_api_initialized)
+ return 0;
+ /*
+ * Request the loading of known TCM subsystem plugins..
+ */
+ if (transport_subsystem_reqmods() < 0)
+ return -1;
+
+ se_global->g_sub_api_initialized = 1;
+ return 0;
+}
+
+struct se_session *transport_init_session(void)
+{
+ struct se_session *se_sess;
+
+ se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+ if (!(se_sess)) {
+ printk(KERN_ERR "Unable to allocate struct se_session from"
+ " se_sess_cache\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ atomic_set(&se_sess->mib_ref_count, 0);
+
+ return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct se_session *se_sess,
+ void *fabric_sess_ptr)
+{
+ unsigned char buf[PR_REG_ISID_LEN];
+
+ se_sess->se_tpg = se_tpg;
+ se_sess->fabric_sess_ptr = fabric_sess_ptr;
+ /*
+ * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+ *
+ * Only set for struct se_session's that will actually be moving I/O.
+ * eg: *NOT* discovery sessions.
+ */
+ if (se_nacl) {
+ /*
+ * If the fabric module supports an ISID based TransportID,
+ * save this value in binary from the fabric I_T Nexus now.
+ */
+ if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ memset(&buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+ &buf[0], PR_REG_ISID_LEN);
+ se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+ }
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The se_nacl->nacl_sess pointer will be set to the
+ * last active I_T Nexus for each struct se_node_acl.
+ */
+ se_nacl->nacl_sess = se_sess;
+
+ list_add_tail(&se_sess->sess_acl_list,
+ &se_nacl->acl_sess_list);
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ }
+ list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+ TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct se_session *se_sess,
+ void *fabric_sess_ptr)
+{
+ spin_lock_bh(&se_tpg->session_lock);
+ __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+ spin_unlock_bh(&se_tpg->session_lock);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+ struct se_node_acl *se_nacl;
+
+ /*
+ * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+ */
+ se_nacl = se_sess->se_node_acl;
+ if ((se_nacl)) {
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ list_del(&se_sess->sess_acl_list);
+ /*
+ * If the session list is empty, then clear the pointer.
+ * Otherwise, set the struct se_session pointer from the tail
+ * element of the per struct se_node_acl active session list.
+ */
+ if (list_empty(&se_nacl->acl_sess_list))
+ se_nacl->nacl_sess = NULL;
+ else {
+ se_nacl->nacl_sess = container_of(
+ se_nacl->acl_sess_list.prev,
+ struct se_session, sess_acl_list);
+ }
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ }
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+ kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
+ struct se_node_acl *se_nacl;
+
+ if (!(se_tpg)) {
+ transport_free_session(se_sess);
+ return;
+ }
+ /*
+ * Wait for possible reference in drivers/target/target_core_mib.c:
+ * scsi_att_intr_port_seq_show()
+ */
+ while (atomic_read(&se_sess->mib_ref_count) != 0)
+ cpu_relax();
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_del(&se_sess->sess_list);
+ se_sess->se_tpg = NULL;
+ se_sess->fabric_sess_ptr = NULL;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * Determine if we need to do extra work for this initiator node's
+ * struct se_node_acl if it had been previously dynamically generated.
+ */
+ se_nacl = se_sess->se_node_acl;
+ if ((se_nacl)) {
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ if (se_nacl->dynamic_node_acl) {
+ if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
+ se_tpg))) {
+ list_del(&se_nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(se_nacl);
+ core_tpg_wait_for_mib_ref(se_nacl);
+ core_free_device_list_for_node(se_nacl, se_tpg);
+ TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+ se_nacl);
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ }
+ }
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+ }
+
+ transport_free_session(se_sess);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ TPG_TFO(se_tpg)->get_fabric_name());
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+ struct se_device *dev;
+ struct se_task *task;
+ unsigned long flags;
+
+ if (!T_TASK(cmd))
+ return;
+
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ dev = task->se_dev;
+ if (!(dev))
+ continue;
+
+ if (atomic_read(&task->task_active))
+ continue;
+
+ if (!(atomic_read(&task->task_state_active)))
+ continue;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_del(&task->t_state_list);
+ DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ atomic_set(&task->task_state_active, 0);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+ }
+}
+
+/* transport_cmd_check_stop():
+ *
+ * 'transport_off = 1' determines if t_transport_active should be cleared.
+ * 'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ * A non-zero u8 t_state sets cmd->t_state.
+ * Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+ struct se_cmd *cmd,
+ int transport_off,
+ u8 t_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
+ */
+ if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+ DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+ " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ cmd->deferred_t_state = cmd->t_state;
+ cmd->t_state = TRANSPORT_DEFERRED_CMD;
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ if (transport_off == 2)
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ return 1;
+ }
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend excpections.
+ */
+ if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+ DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+ " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ cmd->deferred_t_state = cmd->t_state;
+ cmd->t_state = TRANSPORT_DEFERRED_CMD;
+ if (transport_off == 2)
+ transport_all_task_dev_remove_state(cmd);
+
+ /*
+ * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+ * to FE.
+ */
+ if (transport_off == 2)
+ cmd->se_lun = NULL;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&T_TASK(cmd)->t_transport_stop_comp);
+ return 1;
+ }
+ if (transport_off) {
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ if (transport_off == 2) {
+ transport_all_task_dev_remove_state(cmd);
+ /*
+ * Clear struct se_cmd->se_lun before the transport_off == 2
+ * handoff to fabric module.
+ */
+ cmd->se_lun = NULL;
+ /*
+ * Some fabric modules like tcm_loop can release
+ * their internally allocated I/O refrence now and
+ * struct se_cmd now.
+ */
+ if (CMD_TFO(cmd)->check_stop_free != NULL) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ CMD_TFO(cmd)->check_stop_free(cmd);
+ return 1;
+ }
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+ } else if (t_state)
+ cmd->t_state = t_state;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+ return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ unsigned long flags;
+
+ if (!lun)
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto check_lun;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_free_dev_tasks(cmd);
+
+check_lun:
+ spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
+ list_del(&cmd->se_lun_list);
+ atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+#if 0
+ printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+ CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+#endif
+ }
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+ if (remove)
+ transport_generic_remove(cmd, 0, 0);
+}
+
+void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
+{
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+
+ transport_generic_remove(cmd, 0, 0);
+}
+
+static int transport_add_cmd_to_queue(
+ struct se_cmd *cmd,
+ int t_state)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_queue_req *qr;
+ unsigned long flags;
+
+ qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
+ if (!(qr)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_queue_req\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+
+ qr->cmd = (void *)cmd;
+ qr->state = t_state;
+
+ if (t_state) {
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ cmd->t_state = t_state;
+ atomic_set(&T_TASK(cmd)->t_transport_active, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ }
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ list_add_tail(&qr->qr_list, &qobj->qobj_list);
+ atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ atomic_inc(&qobj->queue_cnt);
+ wake_up_interruptible(&qobj->thread_wq);
+ return 0;
+}
+
+/*
+ * Called with struct se_queue_obj->cmd_queue_lock held.
+ */
+static struct se_queue_req *
+__transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr = NULL;
+
+ if (list_empty(&qobj->qobj_list))
+ return NULL;
+
+ list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+ break;
+
+ if (qr->cmd) {
+ cmd = (struct se_cmd *)qr->cmd;
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ }
+ list_del(&qr->qr_list);
+ atomic_dec(&qobj->queue_cnt);
+
+ return qr;
+}
+
+static struct se_queue_req *
+transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ if (list_empty(&qobj->qobj_list)) {
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ return NULL;
+ }
+
+ list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+ break;
+
+ if (qr->cmd) {
+ cmd = (struct se_cmd *)qr->cmd;
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ }
+ list_del(&qr->qr_list);
+ atomic_dec(&qobj->queue_cnt);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ return qr;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+ struct se_queue_obj *qobj)
+{
+ struct se_cmd *q_cmd;
+ struct se_queue_req *qr = NULL, *qr_p = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
+ q_cmd = (struct se_cmd *)qr->cmd;
+ if (q_cmd != cmd)
+ continue;
+
+ atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ kfree(qr);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
+ printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+ }
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+ struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+ struct se_task, t_list);
+
+ if (good) {
+ cmd->scsi_status = SAM_STAT_GOOD;
+ task->task_scsi_status = GOOD;
+ } else {
+ task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+ task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ TASK_CMD(task)->transport_error_status =
+ PYX_TRANSPORT_ILLEGAL_REQUEST;
+ }
+
+ transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+/* transport_complete_task():
+ *
+ * Called from interrupt and non interrupt context depending
+ * on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = task->se_dev;
+ int t_state;
+ unsigned long flags;
+#if 0
+ printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+ T_TASK(cmd)->t_task_cdb[0], dev);
+#endif
+ if (dev) {
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ atomic_inc(&dev->depth_left);
+ atomic_inc(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ }
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&task->task_active, 0);
+
+ /*
+ * See if any sense data exists, if so set the TASK_SENSE flag.
+ * Also check for any other post completion work that needs to be
+ * done by the plugins.
+ */
+ if (dev && dev->transport->transport_complete) {
+ if (dev->transport->transport_complete(task) != 0) {
+ cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+ task->task_sense = 1;
+ success = 1;
+ }
+ }
+
+ /*
+ * See if we are waiting for outstanding struct se_task
+ * to complete for an exception condition
+ */
+ if (atomic_read(&task->task_stop)) {
+ /*
+ * Decrement T_TASK(cmd)->t_se_count if this task had
+ * previously thrown its timeout exception handler.
+ */
+ if (atomic_read(&task->task_timeout)) {
+ atomic_dec(&T_TASK(cmd)->t_se_count);
+ atomic_set(&task->task_timeout, 0);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&task->task_stop_comp);
+ return;
+ }
+ /*
+ * If the task's timeout handler has fired, use the t_task_cdbs_timeout
+ * left counter to determine when the struct se_cmd is ready to be queued to
+ * the processing thread.
+ */
+ if (atomic_read(&task->task_timeout)) {
+ if (!(atomic_dec_and_test(
+ &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return;
+ }
+ t_state = TRANSPORT_COMPLETE_TIMEOUT;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, t_state);
+ return;
+ }
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+
+ /*
+ * Decrement the outstanding t_task_cdbs_left count. The last
+ * struct se_task from struct se_cmd will complete itself into the
+ * device queue depending upon int success.
+ */
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ if (!success)
+ T_TASK(cmd)->t_tasks_failed = 1;
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ if (!success || T_TASK(cmd)->t_tasks_failed) {
+ t_state = TRANSPORT_COMPLETE_FAILURE;
+ if (!task->task_error_status) {
+ task->task_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->transport_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ } else {
+ atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+ t_state = TRANSPORT_COMPLETE_OK;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, t_state);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+ struct se_task *task,
+ struct se_task *task_prev,
+ struct se_device *dev)
+{
+ /*
+ * No SAM Task attribute emulation enabled, add to tail of
+ * execution queue
+ */
+ if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+ list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+ return 0;
+ }
+ /*
+ * HEAD_OF_QUEUE attribute for received CDB, which means
+ * the first task that is associated with a struct se_cmd goes to
+ * head of the struct se_device->execute_task_list, and task_prev
+ * after that for each subsequent task
+ */
+ if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ list_add(&task->t_execute_list,
+ (task_prev != NULL) ?
+ &task_prev->t_execute_list :
+ &dev->execute_task_list);
+
+ DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+ " in execution queue\n",
+ T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+ return 1;
+ }
+ /*
+ * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+ * transitioned from Dermant -> Active state, and are added to the end
+ * of the struct se_device->execute_task_list
+ */
+ list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+ return 0;
+}
+
+/* __transport_add_task_to_execute_queue():
+ *
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+ struct se_task *task,
+ struct se_task *task_prev,
+ struct se_device *dev)
+{
+ int head_of_queue;
+
+ head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+ atomic_inc(&dev->execute_tasks);
+
+ if (atomic_read(&task->task_state_active))
+ return;
+ /*
+ * Determine if this task needs to go to HEAD_OF_QUEUE for the
+ * state list as well. Running with SAM Task Attribute emulation
+ * will always return head_of_queue == 0 here
+ */
+ if (head_of_queue)
+ list_add(&task->t_state_list, (task_prev) ?
+ &task_prev->t_state_list :
+ &dev->state_task_list);
+ else
+ list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+ atomic_set(&task->task_state_active, 1);
+
+ DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+ task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+ struct se_device *dev;
+ struct se_task *task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ dev = task->se_dev;
+
+ if (atomic_read(&task->task_state_active))
+ continue;
+
+ spin_lock(&dev->execute_task_lock);
+ list_add_tail(&task->t_state_list, &dev->state_task_list);
+ atomic_set(&task->task_state_active, 1);
+
+ DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ CMD_TFO(task->task_se_cmd)->get_task_tag(
+ task->task_se_cmd), task, dev);
+
+ spin_unlock(&dev->execute_task_lock);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_task *task, *task_prev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_execute_queue))
+ continue;
+ /*
+ * __transport_add_task_to_execute_queue() handles the
+ * SAM Task Attribute emulation if enabled
+ */
+ __transport_add_task_to_execute_queue(task, task_prev, dev);
+ atomic_set(&task->task_execute_queue, 1);
+ task_prev = task;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ return;
+}
+
+/* transport_get_task_from_execute_queue():
+ *
+ * Called with dev->execute_task_lock held.
+ */
+static struct se_task *
+transport_get_task_from_execute_queue(struct se_device *dev)
+{
+ struct se_task *task;
+
+ if (list_empty(&dev->execute_task_list))
+ return NULL;
+
+ list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
+ break;
+
+ list_del(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+
+ return task;
+}
+
+/* transport_remove_task_from_execute_queue():
+ *
+ *
+ */
+static void transport_remove_task_from_execute_queue(
+ struct se_task *task,
+ struct se_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_del(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+ switch (cmd->data_direction) {
+ case DMA_NONE:
+ return "NONE";
+ case DMA_FROM_DEVICE:
+ return "READ";
+ case DMA_TO_DEVICE:
+ return "WRITE";
+ case DMA_BIDIRECTIONAL:
+ return "BIDI";
+ default:
+ break;
+ }
+
+ return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+ struct se_device *dev,
+ char *b,
+ int *bl)
+{
+ *bl += sprintf(b + *bl, "Status: ");
+ switch (dev->dev_status) {
+ case TRANSPORT_DEVICE_ACTIVATED:
+ *bl += sprintf(b + *bl, "ACTIVATED");
+ break;
+ case TRANSPORT_DEVICE_DEACTIVATED:
+ *bl += sprintf(b + *bl, "DEACTIVATED");
+ break;
+ case TRANSPORT_DEVICE_SHUTDOWN:
+ *bl += sprintf(b + *bl, "SHUTDOWN");
+ break;
+ case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+ case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+ *bl += sprintf(b + *bl, "OFFLINE");
+ break;
+ default:
+ *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+ break;
+ }
+
+ *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
+ atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
+ dev->queue_depth);
+ *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
+ DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+ *bl += sprintf(b + *bl, " ");
+}
+
+/* transport_release_all_cmds():
+ *
+ *
+ */
+static void transport_release_all_cmds(struct se_device *dev)
+{
+ struct se_cmd *cmd = NULL;
+ struct se_queue_req *qr = NULL, *qr_p = NULL;
+ int bug_out = 0, t_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
+ qr_list) {
+
+ cmd = (struct se_cmd *)qr->cmd;
+ t_state = qr->state;
+ list_del(&qr->qr_list);
+ kfree(qr);
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+ flags);
+
+ printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+ " t_state: %u directly\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+
+ transport_release_fe_cmd(cmd);
+ bug_out = 1;
+
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+#if 0
+ if (bug_out)
+ BUG();
+#endif
+}
+
+void transport_dump_vpd_proto_id(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+ switch (vpd->protocol_identifier) {
+ case 0x00:
+ sprintf(buf+len, "Fibre Channel\n");
+ break;
+ case 0x10:
+ sprintf(buf+len, "Parallel SCSI\n");
+ break;
+ case 0x20:
+ sprintf(buf+len, "SSA\n");
+ break;
+ case 0x30:
+ sprintf(buf+len, "IEEE 1394\n");
+ break;
+ case 0x40:
+ sprintf(buf+len, "SCSI Remote Direct Memory Access"
+ " Protocol\n");
+ break;
+ case 0x50:
+ sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+ break;
+ case 0x60:
+ sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+ break;
+ case 0x70:
+ sprintf(buf+len, "Automation/Drive Interface Transport"
+ " Protocol\n");
+ break;
+ case 0x80:
+ sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+ break;
+ default:
+ sprintf(buf+len, "Unknown 0x%02x\n",
+ vpd->protocol_identifier);
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk(KERN_INFO "%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * Check if the Protocol Identifier Valid (PIV) bit is set..
+ *
+ * from spc3r23.pdf section 7.5.1
+ */
+ if (page_83[1] & 0x80) {
+ vpd->protocol_identifier = (page_83[0] & 0xf0);
+ vpd->protocol_identifier_set = 1;
+ transport_dump_vpd_proto_id(vpd, NULL, 0);
+ }
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0, len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+ switch (vpd->association) {
+ case 0x00:
+ sprintf(buf+len, "addressed logical unit\n");
+ break;
+ case 0x10:
+ sprintf(buf+len, "target port\n");
+ break;
+ case 0x20:
+ sprintf(buf+len, "SCSI target device\n");
+ break;
+ default:
+ sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * The VPD identification association..
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 297
+ */
+ vpd->association = (page_83[1] & 0x30);
+ return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0, len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+ switch (vpd->device_identifier_type) {
+ case 0x00:
+ sprintf(buf+len, "Vendor specific\n");
+ break;
+ case 0x01:
+ sprintf(buf+len, "T10 Vendor ID based\n");
+ break;
+ case 0x02:
+ sprintf(buf+len, "EUI-64 based\n");
+ break;
+ case 0x03:
+ sprintf(buf+len, "NAA\n");
+ break;
+ case 0x04:
+ sprintf(buf+len, "Relative target port identifier\n");
+ break;
+ case 0x08:
+ sprintf(buf+len, "SCSI name string\n");
+ break;
+ default:
+ sprintf(buf+len, "Unsupported: 0x%02x\n",
+ vpd->device_identifier_type);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * The VPD identifier type..
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 298
+ */
+ vpd->device_identifier_type = (page_83[1] & 0x0f);
+ return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+ switch (vpd->device_identifier_code_set) {
+ case 0x01: /* Binary */
+ sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ case 0x02: /* ASCII */
+ sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ case 0x03: /* UTF-8 */
+ sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ default:
+ sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+ " 0x%02x", vpd->device_identifier_code_set);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ static const char hex_str[] = "0123456789abcdef";
+ int j = 0, i = 4; /* offset to start of the identifer */
+
+ /*
+ * The VPD Code Set (encoding)
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 296
+ */
+ vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+ switch (vpd->device_identifier_code_set) {
+ case 0x01: /* Binary */
+ vpd->device_identifier[j++] =
+ hex_str[vpd->device_identifier_type];
+ while (i < (4 + page_83[3])) {
+ vpd->device_identifier[j++] =
+ hex_str[(page_83[i] & 0xf0) >> 4];
+ vpd->device_identifier[j++] =
+ hex_str[page_83[i] & 0x0f];
+ i++;
+ }
+ break;
+ case 0x02: /* ASCII */
+ case 0x03: /* UTF-8 */
+ while (i < (4 + page_83[3]))
+ vpd->device_identifier[j++] = page_83[i++];
+ break;
+ default:
+ break;
+ }
+
+ return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, disable the
+ * SAM Task Attribute emulation.
+ *
+ * This is currently not available in upsream Linux/SCSI Target
+ * mode code, and is assumed to be disabled while using TCM/pSCSI.
+ */
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+ return;
+ }
+
+ dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+ DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+ " device\n", TRANSPORT(dev)->name,
+ TRANSPORT(dev)->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+ struct t10_wwn *wwn = DEV_T10_WWN(dev);
+ int i, device_type;
+ /*
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+ */
+ printk(" Vendor: ");
+ for (i = 0; i < 8; i++)
+ if (wwn->vendor[i] >= 0x20)
+ printk("%c", wwn->vendor[i]);
+ else
+ printk(" ");
+
+ printk(" Model: ");
+ for (i = 0; i < 16; i++)
+ if (wwn->model[i] >= 0x20)
+ printk("%c", wwn->model[i]);
+ else
+ printk(" ");
+
+ printk(" Revision: ");
+ for (i = 0; i < 4; i++)
+ if (wwn->revision[i] >= 0x20)
+ printk("%c", wwn->revision[i]);
+ else
+ printk(" ");
+
+ printk("\n");
+
+ device_type = TRANSPORT(dev)->get_device_type(dev);
+ printk(" Type: %s ", scsi_device_type(device_type));
+ printk(" ANSI SCSI revision: %02x\n",
+ TRANSPORT(dev)->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+ struct se_hba *hba,
+ struct se_subsystem_api *transport,
+ struct se_subsystem_dev *se_dev,
+ u32 device_flags,
+ void *transport_dev,
+ struct se_dev_limits *dev_limits,
+ const char *inquiry_prod,
+ const char *inquiry_rev)
+{
+ int ret = 0, force_pt;
+ struct se_device *dev;
+
+ dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+ if (!(dev)) {
+ printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+ return NULL;
+ }
+ dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
+ if (!(dev->dev_queue_obj)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " dev->dev_queue_obj\n");
+ kfree(dev);
+ return NULL;
+ }
+ transport_init_queue_obj(dev->dev_queue_obj);
+
+ dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
+ GFP_KERNEL);
+ if (!(dev->dev_status_queue_obj)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " dev->dev_status_queue_obj\n");
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+ return NULL;
+ }
+ transport_init_queue_obj(dev->dev_status_queue_obj);
+
+ dev->dev_flags = device_flags;
+ dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+ dev->dev_ptr = (void *) transport_dev;
+ dev->se_hba = hba;
+ dev->se_sub_dev = se_dev;
+ dev->transport = transport;
+ atomic_set(&dev->active_cmds, 0);
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+ INIT_LIST_HEAD(&dev->execute_task_list);
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
+ INIT_LIST_HEAD(&dev->ordered_cmd_list);
+ INIT_LIST_HEAD(&dev->state_task_list);
+ spin_lock_init(&dev->execute_task_lock);
+ spin_lock_init(&dev->delayed_cmd_lock);
+ spin_lock_init(&dev->ordered_cmd_lock);
+ spin_lock_init(&dev->state_task_lock);
+ spin_lock_init(&dev->dev_alua_lock);
+ spin_lock_init(&dev->dev_reservation_lock);
+ spin_lock_init(&dev->dev_status_lock);
+ spin_lock_init(&dev->dev_status_thr_lock);
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+
+ dev->queue_depth = dev_limits->queue_depth;
+ atomic_set(&dev->depth_left, dev->queue_depth);
+ atomic_set(&dev->dev_ordered_id, 0);
+
+ se_dev_set_default_attribs(dev, dev_limits);
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+ spin_lock_init(&dev->stats_lock);
+
+ spin_lock(&hba->device_lock);
+ list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+ hba->dev_count++;
+ spin_unlock(&hba->device_lock);
+ /*
+ * Setup the SAM Task Attribute emulation for struct se_device
+ */
+ core_setup_task_attr_emulation(dev);
+ /*
+ * Force PR and ALUA passthrough emulation with internal object use.
+ */
+ force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+ /*
+ * Setup the Reservations infrastructure for struct se_device
+ */
+ core_setup_reservations(dev, force_pt);
+ /*
+ * Setup the Asymmetric Logical Unit Assignment for struct se_device
+ */
+ if (core_setup_alua(dev, force_pt) < 0)
+ goto out;
+
+ /*
+ * Startup the struct se_device processing thread
+ */
+ dev->process_thread = kthread_run(transport_processing_thread, dev,
+ "LIO_%s", TRANSPORT(dev)->name);
+ if (IS_ERR(dev->process_thread)) {
+ printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+ TRANSPORT(dev)->name);
+ goto out;
+ }
+
+ /*
+ * Preload the initial INQUIRY const values if we are doing
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+ * passthrough because this is being provided by the backend LLD.
+ * This is required so that transport_get_inquiry() copies these
+ * originals once back into DEV_T10_WWN(dev) for the virtual device
+ * setup.
+ */
+ if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (!(inquiry_prod) || !(inquiry_prod)) {
+ printk(KERN_ERR "All non TCM/pSCSI plugins require"
+ " INQUIRY consts\n");
+ goto out;
+ }
+
+ strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
+ strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
+ strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+ }
+ scsi_dump_inquiry(dev);
+
+out:
+ if (!ret)
+ return dev;
+ kthread_stop(dev->process_thread);
+
+ spin_lock(&hba->device_lock);
+ list_del(&dev->dev_list);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+
+ se_release_vpd_for_dev(dev);
+
+ kfree(dev->dev_status_queue_obj);
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/* transport_generic_prepare_cdb():
+ *
+ * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
+ * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ * The point of this is since we are mapping iSCSI LUNs to
+ * SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ * devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+ unsigned char *cdb)
+{
+ switch (cdb[0]) {
+ case READ_10: /* SBC - RDProtect */
+ case READ_12: /* SBC - RDProtect */
+ case READ_16: /* SBC - RDProtect */
+ case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+ case VERIFY: /* SBC - VRProtect */
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+ break;
+ }
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+ enum dma_data_direction data_direction)
+{
+ struct se_task *task;
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned long flags;
+
+ task = dev->transport->alloc_task(cmd);
+ if (!task) {
+ printk(KERN_ERR "Unable to allocate struct se_task\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&task->t_list);
+ INIT_LIST_HEAD(&task->t_execute_list);
+ INIT_LIST_HEAD(&task->t_state_list);
+ init_completion(&task->task_stop_comp);
+ task->task_no = T_TASK(cmd)->t_tasks_no++;
+ task->task_se_cmd = cmd;
+ task->se_dev = dev;
+ task->task_data_direction = data_direction;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+void transport_device_setup_cmd(struct se_cmd *cmd)
+{
+ cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
+}
+EXPORT_SYMBOL(transport_device_setup_cmd);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+ struct se_cmd *cmd,
+ struct target_core_fabric_ops *tfo,
+ struct se_session *se_sess,
+ u32 data_length,
+ int data_direction,
+ int task_attr,
+ unsigned char *sense_buffer)
+{
+ INIT_LIST_HEAD(&cmd->se_lun_list);
+ INIT_LIST_HEAD(&cmd->se_delayed_list);
+ INIT_LIST_HEAD(&cmd->se_ordered_list);
+ /*
+ * Setup t_task pointer to t_task_backstore
+ */
+ cmd->t_task = &cmd->t_task_backstore;
+
+ INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
+ init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+ init_completion(&T_TASK(cmd)->t_transport_stop_comp);
+ spin_lock_init(&T_TASK(cmd)->t_state_lock);
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+
+ cmd->se_tfo = tfo;
+ cmd->se_sess = se_sess;
+ cmd->data_length = data_length;
+ cmd->data_direction = data_direction;
+ cmd->sam_task_attr = task_attr;
+ cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+ /*
+ * Check if SAM Task Attribute emulation is enabled for this
+ * struct se_device storage object
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ return 0;
+
+ if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ DEBUG_STA("SAM Task Attribute ACA"
+ " emulation is not supported\n");
+ return -1;
+ }
+ /*
+ * Used to determine when ORDERED commands should go from
+ * Dormant to Active status.
+ */
+ cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+ smp_mb__after_atomic_inc();
+ DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ cmd->se_ordered_id, cmd->sam_task_attr,
+ TRANSPORT(cmd->se_dev)->name);
+ return 0;
+}
+
+void transport_free_se_cmd(
+ struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_tmr_req)
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ /*
+ * Check and free any extended CDB buffer that was allocated
+ */
+ if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
+ kfree(T_TASK(se_cmd)->t_task_cdb);
+}
+EXPORT_SYMBOL(transport_free_se_cmd);
+
+static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
+
+/* transport_generic_allocate_tasks():
+ *
+ * Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ int ret;
+
+ transport_generic_prepare_cdb(cdb);
+
+ /*
+ * This is needed for early exceptions.
+ */
+ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+
+ transport_device_setup_cmd(cmd);
+ /*
+ * Ensure that the received CDB is less than the max (252 + 8) bytes
+ * for VARIABLE_LENGTH_CMD
+ */
+ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+ printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ return -1;
+ }
+ /*
+ * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+ * allocate the additional extended CDB buffer now.. Otherwise
+ * setup the pointer from __t_task_cdb to t_task_cdb.
+ */
+ if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
+ T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+ GFP_KERNEL);
+ if (!(T_TASK(cmd)->t_task_cdb)) {
+ printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
+ " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+ scsi_command_size(cdb),
+ (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
+ return -1;
+ }
+ } else
+ T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+ /*
+ * Copy the original CDB into T_TASK(cmd).
+ */
+ memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+ /*
+ * Setup the received CDB based on SCSI defined opcodes and
+ * perform unit attention, persistent reservations and ALUA
+ * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
+ * pointer is expected to be setup before we reach this point.
+ */
+ ret = transport_generic_cmd_sequencer(cmd, cdb);
+ if (ret < 0)
+ return ret;
+ /*
+ * Check for SAM Task Attribute Emulation
+ */
+ if (transport_check_alloc_task_attr(cmd) < 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -2;
+ }
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (cmd->se_lun->lun_sep)
+ cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends not defining a TFO->new_cmd_map()
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
+ */
+int transport_generic_handle_cdb(
+ struct se_cmd *cmd)
+{
+ if (!SE_LUN(cmd)) {
+ dump_stack();
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+ struct se_cmd *cmd)
+{
+ if (!SE_LUN(cmd)) {
+ dump_stack();
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/* transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+ struct se_cmd *cmd)
+{
+ /*
+ * For the software fabric case, then we assume the nexus is being
+ * failed/shutdown when signals are pending from the kthread context
+ * caller, so we return a failure. For the HW target mode case running
+ * in interrupt code, the signal_pending() check is skipped.
+ */
+ if (!in_interrupt() && signal_pending(current))
+ return -1;
+ /*
+ * If the received CDB has aleady been ABORTED by the generic
+ * target engine, we now call transport_check_aborted_status()
+ * to queue any delated TASK_ABORTED status for the received CDB to the
+ * fabric module as we are expecting no futher incoming DATA OUT
+ * sequences at this point.
+ */
+ if (transport_check_aborted_status(cmd, 1) != 0)
+ return 0;
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/* transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+ struct se_cmd *cmd)
+{
+ /*
+ * This is needed for early exceptions.
+ */
+ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+ transport_device_setup_cmd(cmd);
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ /*
+ * No tasks remain in the execution queue
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+ DEBUG_TS("task_no[%d] - Processing task %p\n",
+ task->task_no, task);
+ /*
+ * If the struct se_task has not been sent and is not active,
+ * remove the struct se_task from the execution queue.
+ */
+ if (!atomic_read(&task->task_sent) &&
+ !atomic_read(&task->task_active)) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ transport_remove_task_from_execute_queue(task,
+ task->se_dev);
+
+ DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+ task->task_no);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ continue;
+ }
+
+ /*
+ * If the struct se_task is active, sleep until it is returned
+ * from the plugin.
+ */
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+
+ DEBUG_TS("task_no[%d] - Waiting to complete\n",
+ task->task_no);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_TS("task_no[%d] - Stopped successfully\n",
+ task->task_no);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ } else {
+ DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+ ret++;
+ }
+
+ __transport_stop_task_timer(task, &flags);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return ret;
+}
+
+static void transport_failure_reset_queue_depth(struct se_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
+ atomic_inc(&dev->depth_left);
+ atomic_inc(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+static void transport_generic_request_failure(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int complete,
+ int sc)
+{
+ DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+ T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+ " %d/%d transport_error_status: %d\n",
+ CMD_TFO(cmd)->get_cmd_state(cmd),
+ cmd->t_state, cmd->deferred_t_state,
+ cmd->transport_error_status);
+ DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+ " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+ " t_transport_active: %d t_transport_stop: %d"
+ " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ transport_stop_all_task_timers(cmd);
+
+ if (dev)
+ transport_failure_reset_queue_depth(dev);
+ /*
+ * For SAM Task Attribute emulation for failed struct se_cmd
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+
+ if (complete) {
+ transport_direct_request_timeout(cmd);
+ cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ switch (cmd->transport_error_status) {
+ case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
+ cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+ break;
+ case PYX_TRANSPORT_INVALID_CDB_FIELD:
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ break;
+ case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ break;
+ case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
+ if (!sc)
+ transport_new_cmd_failure(cmd);
+ /*
+ * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
+ * we force this session to fall back to session
+ * recovery.
+ */
+ CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
+ CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+
+ goto check_stop;
+ case PYX_TRANSPORT_LU_COMM_FAILURE:
+ case PYX_TRANSPORT_ILLEGAL_REQUEST:
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ break;
+ case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ break;
+ case PYX_TRANSPORT_WRITE_PROTECTED:
+ cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ break;
+ case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ /*
+ * No SENSE Data payload for this case, set SCSI Status
+ * and queue the response to $FABRIC_MOD.
+ *
+ * Uses linux/include/scsi/scsi.h SAM status codes defs
+ */
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ /*
+ * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+ * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+ * CONFLICT STATUS.
+ *
+ * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+ */
+ if (SE_SESS(cmd) &&
+ DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+ CMD_TFO(cmd)->queue_status(cmd);
+ goto check_stop;
+ case PYX_TRANSPORT_USE_SENSE_REASON:
+ /*
+ * struct se_cmd->scsi_sense_reason already set
+ */
+ break;
+ default:
+ printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->transport_error_status);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ }
+
+ if (!sc)
+ transport_new_cmd_failure(cmd);
+ else
+ transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+check_stop:
+ transport_lun_remove_cmd(cmd);
+ if (!(transport_cmd_check_stop_to_fabric(cmd)))
+ ;
+}
+
+static void transport_direct_request_timeout(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
+ &T_TASK(cmd)->t_se_count);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_generic_request_timeout(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ /*
+ * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+ * to allow last call to free memory resources.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
+ int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+
+ atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_generic_remove(cmd, 0, 0);
+}
+
+static int
+transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
+{
+ unsigned char *buf;
+
+ buf = kzalloc(data_length, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate memory for buffer\n");
+ return -1;
+ }
+
+ T_TASK(cmd)->t_tasks_se_num = 0;
+ T_TASK(cmd)->t_task_buf = buf;
+
+ return 0;
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+ return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+ return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+ __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+ spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+}
+
+/*
+ * Called from interrupt context.
+ */
+static void transport_task_timeout_handler(unsigned long data)
+{
+ struct se_task *task = (struct se_task *)data;
+ struct se_cmd *cmd = TASK_CMD(task);
+ unsigned long flags;
+
+ DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (task->task_flags & TF_STOP) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ task->task_flags &= ~TF_RUNNING;
+
+ /*
+ * Determine if transport_complete_task() has already been called.
+ */
+ if (!(atomic_read(&task->task_active))) {
+ DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+ " == 0\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ atomic_inc(&T_TASK(cmd)->t_se_count);
+ atomic_inc(&T_TASK(cmd)->t_transport_timeout);
+ T_TASK(cmd)->t_tasks_failed = 1;
+
+ atomic_set(&task->task_timeout, 1);
+ task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
+ task->task_scsi_status = 1;
+
+ if (atomic_read(&task->task_stop)) {
+ DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+ " == 1\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ complete(&task->task_stop_comp);
+ return;
+ }
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+ " t_task_cdbs_left\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+ task, cmd);
+
+ cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
+}
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_start_task_timer(struct se_task *task)
+{
+ struct se_device *dev = task->se_dev;
+ int timeout;
+
+ if (task->task_flags & TF_RUNNING)
+ return;
+ /*
+ * If the task_timeout is disabled, exit now.
+ */
+ timeout = DEV_ATTRIB(dev)->task_timeout;
+ if (!(timeout))
+ return;
+
+ init_timer(&task->task_timer);
+ task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
+ task->task_timer.data = (unsigned long) task;
+ task->task_timer.function = transport_task_timeout_handler;
+
+ task->task_flags |= TF_RUNNING;
+ add_timer(&task->task_timer);
+#if 0
+ printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+ " %d\n", task->task_se_cmd, task, timeout);
+#endif
+}
+
+/*
+ * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ */
+void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+
+ if (!(task->task_flags & TF_RUNNING))
+ return;
+
+ task->task_flags |= TF_STOP;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+
+ del_timer_sync(&task->task_timer);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+ task->task_flags &= ~TF_RUNNING;
+ task->task_flags &= ~TF_STOP;
+}
+
+static void transport_stop_all_task_timers(struct se_cmd *cmd)
+{
+ struct se_task *task = NULL, *task_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list)
+ __transport_stop_task_timer(task, &flags);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline int transport_tcq_window_closed(struct se_device *dev)
+{
+ if (dev->dev_tcq_window_closed++ <
+ PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
+ msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
+ } else
+ msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
+
+ wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ return 0;
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+ if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ return 1;
+ /*
+ * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+ * to allow the passed struct se_cmd list of tasks to the front of the list.
+ */
+ if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+ smp_mb__after_atomic_inc();
+ DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+ " 0x%02x, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->se_ordered_id);
+ return 1;
+ } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
+ list_add_tail(&cmd->se_ordered_list,
+ &SE_DEV(cmd)->ordered_cmd_list);
+ spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+
+ atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+ smp_mb__after_atomic_inc();
+
+ DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+ " list, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->se_ordered_id);
+ /*
+ * Add ORDERED command to tail of execution queue if
+ * no other older commands exist that need to be
+ * completed first.
+ */
+ if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+ return 1;
+ } else {
+ /*
+ * For SIMPLE and UNTAGGED Task Attribute commands
+ */
+ atomic_inc(&SE_DEV(cmd)->simple_cmds);
+ smp_mb__after_atomic_inc();
+ }
+ /*
+ * Otherwise if one or more outstanding ORDERED task attribute exist,
+ * add the dormant task(s) built for the passed struct se_cmd to the
+ * execution queue and become in Active state for this struct se_device.
+ */
+ if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+ /*
+ * Otherwise, add cmd w/ tasks to delayed cmd queue that
+ * will be drained upon competion of HEAD_OF_QUEUE task.
+ */
+ spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+ cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+ list_add_tail(&cmd->se_delayed_list,
+ &SE_DEV(cmd)->delayed_cmd_list);
+ spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+
+ DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
+ /*
+ * Return zero to let transport_execute_tasks() know
+ * not to add the delayed tasks to the execution list.
+ */
+ return 0;
+ }
+ /*
+ * Otherwise, no ORDERED task attributes exist..
+ */
+ return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+ int add_tasks;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
+ if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+ cmd->transport_error_status =
+ PYX_TRANSPORT_LU_COMM_FAILURE;
+ transport_generic_request_failure(cmd, NULL, 0, 1);
+ return 0;
+ }
+ }
+ /*
+ * Call transport_cmd_check_stop() to see if a fabric exception
+ * has occured that prevents execution.
+ */
+ if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+ /*
+ * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+ * attribute for the tasks of the received struct se_cmd CDB
+ */
+ add_tasks = transport_execute_task_attr(cmd);
+ if (add_tasks == 0)
+ goto execute_tasks;
+ /*
+ * This calls transport_add_tasks_from_cmd() to handle
+ * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
+ * (if enabled) in __transport_add_task_to_execute_queue() and
+ * transport_add_task_check_sam_attr().
+ */
+ transport_add_tasks_from_cmd(cmd);
+ }
+ /*
+ * Kick the execution queue for the cmd associated struct se_device
+ * storage object.
+ */
+execute_tasks:
+ __transport_execute_tasks(SE_DEV(cmd));
+ return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev)
+{
+ int error;
+ struct se_cmd *cmd = NULL;
+ struct se_task *task;
+ unsigned long flags;
+
+ /*
+ * Check if there is enough room in the device and HBA queue to send
+ * struct se_transport_task's to the selected transport.
+ */
+check_depth:
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (!(atomic_read(&dev->depth_left)) ||
+ !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ return transport_tcq_window_closed(dev);
+ }
+ dev->dev_tcq_window_closed = 0;
+
+ spin_lock(&dev->execute_task_lock);
+ task = transport_get_task_from_execute_queue(dev);
+ spin_unlock(&dev->execute_task_lock);
+
+ if (!task) {
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ return 0;
+ }
+
+ atomic_dec(&dev->depth_left);
+ atomic_dec(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+
+ cmd = TASK_CMD(task);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&task->task_active, 1);
+ atomic_set(&task->task_sent, 1);
+ atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+
+ if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
+ T_TASK(cmd)->t_task_cdbs)
+ atomic_set(&cmd->transport_sent, 1);
+
+ transport_start_task_timer(task);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * The struct se_cmd->transport_emulate_cdb() function pointer is used
+ * to grab REPORT_LUNS CDBs before they hit the
+ * struct se_subsystem_api->do_task() caller below.
+ */
+ if (cmd->transport_emulate_cdb) {
+ error = cmd->transport_emulate_cdb(cmd);
+ if (error != 0) {
+ cmd->transport_error_status = error;
+ atomic_set(&task->task_active, 0);
+ atomic_set(&cmd->transport_sent, 0);
+ transport_stop_tasks_for_cmd(cmd);
+ transport_generic_request_failure(cmd, dev, 0, 1);
+ goto check_depth;
+ }
+ /*
+ * Handle the successful completion for transport_emulate_cdb()
+ * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
+ * Otherwise the caller is expected to complete the task with
+ * proper status.
+ */
+ if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+ cmd->scsi_status = SAM_STAT_GOOD;
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ } else {
+ /*
+ * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
+ * RAMDISK we use the internal transport_emulate_control_cdb() logic
+ * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
+ * LUN emulation code.
+ *
+ * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
+ * call ->do_task() directly and let the underlying TCM subsystem plugin
+ * code handle the CDB emulation.
+ */
+ if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ error = transport_emulate_control_cdb(task);
+ else
+ error = TRANSPORT(dev)->do_task(task);
+
+ if (error != 0) {
+ cmd->transport_error_status = error;
+ atomic_set(&task->task_active, 0);
+ atomic_set(&cmd->transport_sent, 0);
+ transport_stop_tasks_for_cmd(cmd);
+ transport_generic_request_failure(cmd, dev, 0, 1);
+ }
+ }
+
+ goto check_depth;
+
+ return 0;
+}
+
+void transport_new_cmd_failure(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Any unsolicited data will get dumped for failed command inside of
+ * the fabric plugin
+ */
+ spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+
+ CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+}
+
+static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
+
+static inline u32 transport_get_sectors_6(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 8-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * Use 24-bit allocation length for TYPE_TAPE.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 8-bit sector value.
+ */
+type_disk:
+ return (u32)cdb[4];
+}
+
+static inline u32 transport_get_sectors_10(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 16-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * XXX_10 is not defined in SSC, throw an exception
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -1;
+ return 0;
+ }
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 16-bit sector value.
+ */
+type_disk:
+ return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * XXX_12 is not defined in SSC, throw an exception
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -1;
+ return 0;
+ }
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 32-bit sector value.
+ */
+type_disk:
+ return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * Use 24-bit allocation length for TYPE_TAPE.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+ return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+ (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+ (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+ u32 sectors,
+ unsigned char *cdb,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ if (cdb[1] & 1) { /* sectors */
+ return DEV_ATTRIB(dev)->block_size * sectors;
+ } else /* bytes */
+ return sectors;
+ }
+#if 0
+ printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+ " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
+ DEV_ATTRIB(dev)->block_size * sectors,
+ TRANSPORT(dev)->name);
+#endif
+ return DEV_ATTRIB(dev)->block_size * sectors;
+}
+
+unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
+{
+ unsigned char result = 0;
+ /*
+ * MSB
+ */
+ if ((val[0] >= 'a') && (val[0] <= 'f'))
+ result = ((val[0] - 'a' + 10) & 0xf) << 4;
+ else
+ if ((val[0] >= 'A') && (val[0] <= 'F'))
+ result = ((val[0] - 'A' + 10) & 0xf) << 4;
+ else /* digit */
+ result = ((val[0] - '0') & 0xf) << 4;
+ /*
+ * LSB
+ */
+ if ((val[1] >= 'a') && (val[1] <= 'f'))
+ result |= ((val[1] - 'a' + 10) & 0xf);
+ else
+ if ((val[1] >= 'A') && (val[1] <= 'F'))
+ result |= ((val[1] - 'A' + 10) & 0xf);
+ else /* digit */
+ result |= ((val[1] - '0') & 0xf);
+
+ return result;
+}
+EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+ unsigned char *buf, *addr;
+ struct se_mem *se_mem;
+ unsigned int offset;
+ int i;
+ /*
+ * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+ *
+ * 1) read the specified logical block(s);
+ * 2) transfer logical blocks from the data-out buffer;
+ * 3) XOR the logical blocks transferred from the data-out buffer with
+ * the logical blocks read, storing the resulting XOR data in a buffer;
+ * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+ * blocks transferred from the data-out buffer; and
+ * 5) transfer the resulting XOR data to the data-in buffer.
+ */
+ buf = kmalloc(cmd->data_length, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+ return;
+ }
+ /*
+ * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+ * into the locally allocated *buf
+ */
+ transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+ /*
+ * Now perform the XOR against the BIDI read memory located at
+ * T_TASK(cmd)->t_mem_bidi_list
+ */
+
+ offset = 0;
+ list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
+ addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
+ if (!(addr))
+ goto out;
+
+ for (i = 0; i < se_mem->se_len; i++)
+ *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+
+ offset += se_mem->se_len;
+ kunmap_atomic(addr, KM_USER0);
+ }
+out:
+ kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+ unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+ struct se_device *dev;
+ struct se_task *task = NULL, *task_tmp;
+ unsigned long flags;
+ u32 offset = 0;
+
+ if (!SE_LUN(cmd)) {
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return 0;
+ }
+
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+
+ if (!task->task_sense)
+ continue;
+
+ dev = task->se_dev;
+ if (!(dev))
+ continue;
+
+ if (!TRANSPORT(dev)->get_sense_buffer) {
+ printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+ " is NULL\n");
+ continue;
+ }
+
+ sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
+ if (!(sense_buffer)) {
+ printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+ " sense buffer for task with sense\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+ continue;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ TRANSPORT_SENSE_BUFFER);
+
+ memcpy((void *)&buffer[offset], (void *)sense_buffer,
+ TRANSPORT_SENSE_BUFFER);
+ cmd->scsi_status = task->task_scsi_status;
+ /* Automatically padded */
+ cmd->scsi_sense_length =
+ (TRANSPORT_SENSE_BUFFER + offset);
+
+ printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+ " and sense\n",
+ dev->se_hba->hba_id, TRANSPORT(dev)->name,
+ cmd->scsi_status);
+ return 0;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return -1;
+}
+
+static int transport_allocate_resources(struct se_cmd *cmd)
+{
+ u32 length = cmd->data_length;
+
+ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
+ return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+ else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
+ return transport_generic_allocate_buf(cmd, length);
+ else
+ return 0;
+}
+
+static int
+transport_handle_reservation_conflict(struct se_cmd *cmd)
+{
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ /*
+ * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+ * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+ * CONFLICT STATUS.
+ *
+ * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+ */
+ if (SE_SESS(cmd) &&
+ DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+ return -2;
+}
+
+/* transport_generic_cmd_sequencer():
+ *
+ * Generic Command Sequencer that should work for most DAS transport
+ * drivers.
+ *
+ * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ * RX Thread.
+ *
+ * FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ int ret = 0, sector_ret = 0, passthrough;
+ u32 sectors = 0, size = 0, pr_reg_type = 0;
+ u16 service_action;
+ u8 alua_ascq = 0;
+ /*
+ * Check for an existing UNIT ATTENTION condition
+ */
+ if (core_scsi3_ua_check(cmd, cdb) < 0) {
+ cmd->transport_wait_for_tasks =
+ &transport_nop_wait_for_tasks;
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+ return -2;
+ }
+ /*
+ * Check status of Asymmetric Logical Unit Assignment port
+ */
+ ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+ if (ret != 0) {
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ if (ret > 0) {
+#if 0
+ printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+ " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+ CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+#endif
+ transport_set_sense_codes(cmd, 0x04, alua_ascq);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+ return -2;
+ }
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Check status for SPC-3 Persistent Reservations
+ */
+ if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
+ if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+ cmd, cdb, pr_reg_type) != 0)
+ return transport_handle_reservation_conflict(cmd);
+ /*
+ * This means the CDB is allowed for the SCSI Initiator port
+ * when said port is *NOT* holding the legacy SPC-2 or
+ * SPC-3 Persistent Reservation.
+ */
+ }
+
+ switch (cdb[0]) {
+ case READ_6:
+ sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_6;
+ T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_10:
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_12:
+ sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_12;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_16;
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_6:
+ sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_6;
+ T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_10:
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_12:
+ sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_12;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_16;
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case XDWRITEREAD_10:
+ if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ !(T_TASK(cmd)->t_tasks_bidi))
+ goto out_invalid_cdb_field;
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+ /*
+ * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+ */
+ cmd->transport_complete_callback = &transport_xor_callback;
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ break;
+ case VARIABLE_LENGTH_CMD:
+ service_action = get_unaligned_be16(&cdb[8]);
+ /*
+ * Determine if this is TCM/PSCSI device and we should disable
+ * internal emulation for this CDB.
+ */
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+
+ switch (service_action) {
+ case XDWRITEREAD_32:
+ sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ /*
+ * Use WRITE_32 and READ_32 opcodes for the emulated
+ * XDWRITE_READ_32 logic.
+ */
+ cmd->transport_split_cdb = &split_cdb_XX_32;
+ T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+
+ /*
+ * Setup BIDI XOR callback to be run during
+ * transport_generic_complete_ok()
+ */
+ cmd->transport_complete_callback = &transport_xor_callback;
+ T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+ break;
+ case WRITE_SAME_32:
+ sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+
+ if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
+ printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(cdb[10] & 0x08)) {
+ printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+ " supported for Block Discard Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ break;
+ default:
+ printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+ " 0x%04x not supported\n", service_action);
+ goto out_unsupported_cdb;
+ }
+ break;
+ case 0xa3:
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ /* MAINTENANCE_IN from SCC-2 */
+ /*
+ * Check for emulated MI_REPORT_TARGET_PGS.
+ */
+ if (cdb[1] == MI_REPORT_TARGET_PGS) {
+ cmd->transport_emulate_cdb =
+ (T10_ALUA(su_dev)->alua_type ==
+ SPC3_ALUA_EMULATED) ?
+ &core_emulate_report_target_port_groups :
+ NULL;
+ }
+ size = (cdb[6] << 24) | (cdb[7] << 16) |
+ (cdb[8] << 8) | cdb[9];
+ } else {
+ /* GPCMD_SEND_KEY from multi media commands */
+ size = (cdb[8] << 8) + cdb[9];
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case MODE_SELECT:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case MODE_SELECT_10:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case MODE_SENSE:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case MODE_SENSE_10:
+ case GPCMD_READ_BUFFER_CAPACITY:
+ case GPCMD_SEND_OPC:
+ case LOG_SELECT:
+ case LOG_SENSE:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_BLOCK_LIMITS:
+ size = READ_BLOCK_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case GPCMD_GET_CONFIGURATION:
+ case GPCMD_READ_FORMAT_CAPACITIES:
+ case GPCMD_READ_DISC_INFO:
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type ==
+ SPC3_PERSISTENT_RESERVATIONS) ?
+ &core_scsi3_emulate_pr : NULL;
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ case GPCMD_READ_DVD_STRUCTURE:
+ size = (cdb[8] << 8) + cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case READ_POSITION:
+ size = READ_POSITION_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case 0xa4:
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ /* MAINTENANCE_OUT from SCC-2
+ *
+ * Check for emulated MO_SET_TARGET_PGS.
+ */
+ if (cdb[1] == MO_SET_TARGET_PGS) {
+ cmd->transport_emulate_cdb =
+ (T10_ALUA(su_dev)->alua_type ==
+ SPC3_ALUA_EMULATED) ?
+ &core_emulate_set_target_port_groups :
+ NULL;
+ }
+
+ size = (cdb[6] << 24) | (cdb[7] << 16) |
+ (cdb[8] << 8) | cdb[9];
+ } else {
+ /* GPCMD_REPORT_KEY from multi media commands */
+ size = (cdb[8] << 8) + cdb[9];
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case INQUIRY:
+ size = (cdb[3] << 8) + cdb[4];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * See spc4r17 section 5.3
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_BUFFER:
+ size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_CAPACITY:
+ size = READ_CAP_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_MEDIA_SERIAL_NUMBER:
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case SERVICE_ACTION_IN:
+ case ACCESS_CONTROL_IN:
+ case ACCESS_CONTROL_OUT:
+ case EXTENDED_COPY:
+ case READ_ATTRIBUTE:
+ case RECEIVE_COPY_RESULTS:
+ case WRITE_ATTRIBUTE:
+ size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ size = (cdb[3] << 8) | cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+ case GPCMD_READ_CD:
+ sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ size = (2336 * sectors);
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+#endif
+ case READ_TOC:
+ size = cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case REQUEST_SENSE:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_ELEMENT_STATUS:
+ size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case WRITE_BUFFER:
+ size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /*
+ * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RESERVE_10)
+ size = (cdb[7] << 8) | cdb[8];
+ else
+ size = cmd->data_length;
+
+ /*
+ * Setup the legacy emulated handler for SPC-2 and
+ * >= SPC-3 compatible reservation handling (CRH=1)
+ * Otherwise, we assume the underlying SCSI logic is
+ * is running in SPC_PASSTHROUGH, and wants reservations
+ * emulation disabled.
+ */
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type !=
+ SPC_PASSTHROUGH) ?
+ &core_scsi2_emulate_crh : NULL;
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ /*
+ * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RELEASE_10)
+ size = (cdb[7] << 8) | cdb[8];
+ else
+ size = cmd->data_length;
+
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type !=
+ SPC_PASSTHROUGH) ?
+ &core_scsi2_emulate_crh : NULL;
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case SYNCHRONIZE_CACHE:
+ case 0x91: /* SYNCHRONIZE_CACHE_16: */
+ /*
+ * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+ */
+ if (cdb[0] == SYNCHRONIZE_CACHE) {
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ }
+ if (sector_ret)
+ goto out_unsupported_cdb;
+
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+ /*
+ * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
+ */
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ break;
+ /*
+ * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
+ * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
+ */
+ cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+ /*
+ * Check to ensure that LBA + Range does not exceed past end of
+ * device.
+ */
+ if (transport_get_sectors(cmd) < 0)
+ goto out_invalid_cdb_field;
+ break;
+ case UNMAP:
+ size = get_unaligned_be16(&cdb[7]);
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Determine if the received UNMAP used to for direct passthrough
+ * into Linux/SCSI with struct request via TCM/pSCSI or we are
+ * signaling the use of internal transport_generic_unmap() emulation
+ * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
+ * subsystem plugin backstores.
+ */
+ if (!(passthrough))
+ cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
+
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case WRITE_SAME_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Determine if the received WRITE_SAME_16 is used to for direct
+ * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+ * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+ * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
+ * TCM/FILEIO subsystem plugin backstores.
+ */
+ if (!(passthrough)) {
+ if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
+ printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(cdb[1] & 0x08)) {
+ printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+ " supported for Block Discard Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ case GPCMD_CLOSE_TRACK:
+ case ERASE:
+ case INITIALIZE_ELEMENT_STATUS:
+ case GPCMD_LOAD_UNLOAD:
+ case REZERO_UNIT:
+ case SEEK_10:
+ case GPCMD_SET_SPEED:
+ case SPACE:
+ case START_STOP:
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case WRITE_FILEMARKS:
+ case MOVE_MEDIUM:
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case REPORT_LUNS:
+ cmd->transport_emulate_cdb =
+ &transport_core_report_lun_response;
+ size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * See spc4r17 section 5.3
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ default:
+ printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ " 0x%02x, sending CHECK_CONDITION.\n",
+ CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ goto out_unsupported_cdb;
+ }
+
+ if (size != cmd->data_length) {
+ printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+ cmd->data_length, size, cdb[0]);
+
+ cmd->cmd_spdtl = size;
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ printk(KERN_ERR "Rejecting underflow/overflow"
+ " WRITE data\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+ * type SCF_SCSI_DATA_SG_IO_CDB.
+ */
+ if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
+ printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+ " CDB on non 512-byte sector setup subsystem"
+ " plugin: %s\n", TRANSPORT(dev)->name);
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ }
+ cmd->data_length = size;
+ }
+
+ transport_set_supported_SAM_opcode(cmd);
+ return ret;
+
+out_unsupported_cdb:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -2;
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -2;
+}
+
+static inline void transport_release_tasks(struct se_cmd *);
+
+/*
+ * This function will copy a contiguous *src buffer into a destination
+ * struct scatterlist array.
+ */
+static void transport_memcpy_write_contig(
+ struct se_cmd *cmd,
+ struct scatterlist *sg_d,
+ unsigned char *src)
+{
+ u32 i = 0, length = 0, total_length = cmd->data_length;
+ void *dst;
+
+ while (total_length) {
+ length = sg_d[i].length;
+
+ if (length > total_length)
+ length = total_length;
+
+ dst = sg_virt(&sg_d[i]);
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ src += length;
+ i++;
+ }
+}
+
+/*
+ * This function will copy a struct scatterlist array *sg_s into a destination
+ * contiguous *dst buffer.
+ */
+static void transport_memcpy_read_contig(
+ struct se_cmd *cmd,
+ unsigned char *dst,
+ struct scatterlist *sg_s)
+{
+ u32 i = 0, length = 0, total_length = cmd->data_length;
+ void *src;
+
+ while (total_length) {
+ length = sg_s[i].length;
+
+ if (length > total_length)
+ length = total_length;
+
+ src = sg_virt(&sg_s[i]);
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ dst += length;
+ i++;
+ }
+}
+
+static void transport_memcpy_se_mem_read_contig(
+ struct se_cmd *cmd,
+ unsigned char *dst,
+ struct list_head *se_mem_list)
+{
+ struct se_mem *se_mem;
+ void *src;
+ u32 length = 0, total_length = cmd->data_length;
+
+ list_for_each_entry(se_mem, se_mem_list, se_list) {
+ length = se_mem->se_len;
+
+ if (length > total_length)
+ length = total_length;
+
+ src = page_address(se_mem->se_page) + se_mem->se_off;
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ dst += length;
+ }
+}
+
+/*
+ * Called from transport_generic_complete_ok() and
+ * transport_generic_request_failure() to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_cmd *cmd_p, *cmd_tmp;
+ int new_active_tasks = 0;
+
+ if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ atomic_dec(&dev->simple_cmds);
+ smp_mb__after_atomic_dec();
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+ " SIMPLE: %u\n", dev->dev_cur_ordered_id,
+ cmd->se_ordered_id);
+ } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ atomic_dec(&dev->dev_hoq_count);
+ smp_mb__after_atomic_dec();
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+ " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+ cmd->se_ordered_id);
+ } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ spin_lock(&dev->ordered_cmd_lock);
+ list_del(&cmd->se_ordered_list);
+ atomic_dec(&dev->dev_ordered_sync);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&dev->ordered_cmd_lock);
+
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+ " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+ }
+ /*
+ * Process all commands up to the last received
+ * ORDERED task attribute which requires another blocking
+ * boundary
+ */
+ spin_lock(&dev->delayed_cmd_lock);
+ list_for_each_entry_safe(cmd_p, cmd_tmp,
+ &dev->delayed_cmd_list, se_delayed_list) {
+
+ list_del(&cmd_p->se_delayed_list);
+ spin_unlock(&dev->delayed_cmd_lock);
+
+ DEBUG_STA("Calling add_tasks() for"
+ " cmd_p: 0x%02x Task Attr: 0x%02x"
+ " Dormant -> Active, se_ordered_id: %u\n",
+ T_TASK(cmd_p)->t_task_cdb[0],
+ cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+ transport_add_tasks_from_cmd(cmd_p);
+ new_active_tasks++;
+
+ spin_lock(&dev->delayed_cmd_lock);
+ if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ break;
+ }
+ spin_unlock(&dev->delayed_cmd_lock);
+ /*
+ * If new tasks have become active, wake up the transport thread
+ * to do the processing of the Active tasks.
+ */
+ if (new_active_tasks != 0)
+ wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+}
+
+static void transport_generic_complete_ok(struct se_cmd *cmd)
+{
+ int reason = 0;
+ /*
+ * Check if we need to move delayed/dormant tasks from cmds on the
+ * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+ * Attribute.
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+ /*
+ * Check if we need to retrieve a sense buffer from
+ * the struct se_cmd in question.
+ */
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ if (transport_get_sense_data(cmd) < 0)
+ reason = TCM_NON_EXISTENT_LUN;
+
+ /*
+ * Only set when an struct se_task->task_scsi_status returned
+ * a non GOOD status.
+ */
+ if (cmd->scsi_status) {
+ transport_send_check_condition_and_sense(
+ cmd, reason, 1);
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ }
+ /*
+ * Check for a callback, used by amoungst other things
+ * XDWRITE_READ_10 emulation.
+ */
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd);
+
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * If enabled by TCM fabirc module pre-registered SGL
+ * memory, perform the memcpy() from the TCM internal
+ * contigious buffer back to the original SGL.
+ */
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+ transport_memcpy_write_contig(cmd,
+ T_TASK(cmd)->t_task_pt_sgl,
+ T_TASK(cmd)->t_task_buf);
+
+ CMD_TFO(cmd)->queue_data_in(cmd);
+ break;
+ case DMA_TO_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * Check if we need to send READ payload for BIDI-COMMAND
+ */
+ if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ CMD_TFO(cmd)->queue_data_in(cmd);
+ break;
+ }
+ /* Fall through for DMA_TO_DEVICE */
+ case DMA_NONE:
+ CMD_TFO(cmd)->queue_status(cmd);
+ break;
+ default:
+ break;
+ }
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_active))
+ continue;
+
+ kfree(task->task_sg_bidi);
+ kfree(task->task_sg);
+
+ list_del(&task->t_list);
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (task->se_dev)
+ TRANSPORT(task->se_dev)->free_task(task);
+ else
+ printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+ task->task_no);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+ struct se_mem *se_mem, *se_mem_tmp;
+ int free_page = 1;
+
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+ free_page = 0;
+ if (cmd->se_dev->transport->do_se_mem_map)
+ free_page = 0;
+
+ if (T_TASK(cmd)->t_task_buf) {
+ kfree(T_TASK(cmd)->t_task_buf);
+ T_TASK(cmd)->t_task_buf = NULL;
+ return;
+ }
+
+ /*
+ * Caller will handle releasing of struct se_mem.
+ */
+ if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
+ return;
+
+ if (!(T_TASK(cmd)->t_tasks_se_num))
+ return;
+
+ list_for_each_entry_safe(se_mem, se_mem_tmp,
+ T_TASK(cmd)->t_mem_list, se_list) {
+ /*
+ * We only release call __free_page(struct se_mem->se_page) when
+ * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+ */
+ if (free_page)
+ __free_page(se_mem->se_page);
+
+ list_del(&se_mem->se_list);
+ kmem_cache_free(se_mem_cache, se_mem);
+ }
+
+ if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
+ list_for_each_entry_safe(se_mem, se_mem_tmp,
+ T_TASK(cmd)->t_mem_bidi_list, se_list) {
+ /*
+ * We only release call __free_page(struct se_mem->se_page) when
+ * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+ */
+ if (free_page)
+ __free_page(se_mem->se_page);
+
+ list_del(&se_mem->se_list);
+ kmem_cache_free(se_mem_cache, se_mem);
+ }
+ }
+
+ kfree(T_TASK(cmd)->t_mem_bidi_list);
+ T_TASK(cmd)->t_mem_bidi_list = NULL;
+ kfree(T_TASK(cmd)->t_mem_list);
+ T_TASK(cmd)->t_mem_list = NULL;
+ T_TASK(cmd)->t_tasks_se_num = 0;
+}
+
+static inline void transport_release_tasks(struct se_cmd *cmd)
+{
+ transport_free_dev_tasks(cmd);
+}
+
+static inline int transport_dec_and_check(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return 1;
+ }
+ }
+
+ if (atomic_read(&T_TASK(cmd)->t_se_count)) {
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return 1;
+ }
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+}
+
+static void transport_release_fe_cmd(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (transport_dec_and_check(cmd))
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto free_pages;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_release_tasks(cmd);
+free_pages:
+ transport_free_pages(cmd);
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_direct(cmd);
+}
+
+static int transport_generic_remove(
+ struct se_cmd *cmd,
+ int release_to_pool,
+ int session_reinstatement)
+{
+ unsigned long flags;
+
+ if (!(T_TASK(cmd)))
+ goto release_cmd;
+
+ if (transport_dec_and_check(cmd)) {
+ if (session_reinstatement) {
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ }
+ return 1;
+ }
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto free_pages;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_release_tasks(cmd);
+free_pages:
+ transport_free_pages(cmd);
+
+release_cmd:
+ if (release_to_pool) {
+ transport_release_cmd_to_pool(cmd);
+ } else {
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_direct(cmd);
+ }
+
+ return 0;
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * @cmd: Associated se_cmd descriptor
+ * @mem: SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+ struct se_cmd *cmd,
+ struct scatterlist *mem,
+ u32 sg_mem_num,
+ struct scatterlist *mem_bidi_in,
+ u32 sg_mem_bidi_num)
+{
+ u32 se_mem_cnt_out = 0;
+ int ret;
+
+ if (!(mem) || !(sg_mem_num))
+ return 0;
+ /*
+ * Passed *mem will contain a list_head containing preformatted
+ * struct se_mem elements...
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
+ if ((mem_bidi_in) || (sg_mem_bidi_num)) {
+ printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
+ " with BIDI-COMMAND\n");
+ return -ENOSYS;
+ }
+
+ T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
+ T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
+ cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
+ return 0;
+ }
+ /*
+ * Otherwise, assume the caller is passing a struct scatterlist
+ * array from include/linux/scatterlist.h
+ */
+ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * For CDB using TCM struct se_mem linked list scatterlist memory
+ * processed into a TCM struct se_subsystem_dev, we do the mapping
+ * from the passed physical memory to struct se_mem->se_page here.
+ */
+ T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_list))
+ return -ENOMEM;
+
+ ret = transport_map_sg_to_mem(cmd,
+ T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
+ if (ret < 0)
+ return -ENOMEM;
+
+ T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
+ /*
+ * Setup BIDI READ list of struct se_mem elements
+ */
+ if ((mem_bidi_in) && (sg_mem_bidi_num)) {
+ T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+ se_mem_cnt_out = 0;
+
+ ret = transport_map_sg_to_mem(cmd,
+ T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
+ &se_mem_cnt_out);
+ if (ret < 0) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+
+ T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+ }
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+ if (mem_bidi_in || sg_mem_bidi_num) {
+ printk(KERN_ERR "BIDI-Commands not supported using "
+ "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
+ return -ENOSYS;
+ }
+ /*
+ * For incoming CDBs using a contiguous buffer internall with TCM,
+ * save the passed struct scatterlist memory. After TCM storage object
+ * processing has completed for this struct se_cmd, TCM core will call
+ * transport_memcpy_[write,read]_contig() as necessary from
+ * transport_generic_complete_ok() and transport_write_pending() in order
+ * to copy the TCM buffer to/from the original passed *mem in SGL ->
+ * struct scatterlist format.
+ */
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
+ T_TASK(cmd)->t_task_pt_sgl = mem;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_get_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ T_TASK(cmd)->t_tasks_sectors =
+ (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+ if (!(T_TASK(cmd)->t_tasks_sectors))
+ T_TASK(cmd)->t_tasks_sectors = 1;
+
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
+ return 0;
+
+ if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
+ transport_dev_end_lba(dev)) {
+ printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+ " transport_dev_end_lba(): %llu\n",
+ T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+ transport_dev_end_lba(dev));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+ return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+ }
+
+ return 0;
+}
+
+static int transport_new_cmd_obj(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ u32 task_cdbs = 0, rc;
+
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ task_cdbs++;
+ T_TASK(cmd)->t_task_cdbs++;
+ } else {
+ int set_counts = 1;
+
+ /*
+ * Setup any BIDI READ tasks and memory from
+ * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
+ * are queued first for the non pSCSI passthrough case.
+ */
+ if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+ (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ rc = transport_generic_get_cdb_count(cmd,
+ T_TASK(cmd)->t_task_lba,
+ T_TASK(cmd)->t_tasks_sectors,
+ DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
+ set_counts);
+ if (!(rc)) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ set_counts = 0;
+ }
+ /*
+ * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
+ * Note for BIDI transfers this will contain the WRITE payload
+ */
+ task_cdbs = transport_generic_get_cdb_count(cmd,
+ T_TASK(cmd)->t_task_lba,
+ T_TASK(cmd)->t_tasks_sectors,
+ cmd->data_direction, T_TASK(cmd)->t_mem_list,
+ set_counts);
+ if (!(task_cdbs)) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ T_TASK(cmd)->t_task_cdbs += task_cdbs;
+
+#if 0
+ printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
+ " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
+ T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+ T_TASK(cmd)->t_task_cdbs);
+#endif
+ }
+
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
+ return 0;
+}
+
+static struct list_head *transport_init_se_mem_list(void)
+{
+ struct list_head *se_mem_list;
+
+ se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!(se_mem_list)) {
+ printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(se_mem_list);
+
+ return se_mem_list;
+}
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+{
+ unsigned char *buf;
+ struct se_mem *se_mem;
+
+ T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_list))
+ return -ENOMEM;
+
+ /*
+ * If the device uses memory mapping this is enough.
+ */
+ if (cmd->se_dev->transport->do_se_mem_map)
+ return 0;
+
+ /*
+ * Setup BIDI-COMMAND READ list of struct se_mem elements
+ */
+ if (T_TASK(cmd)->t_tasks_bidi) {
+ T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+ }
+
+ while (length) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+ se_mem->se_len = (length > dma_size) ? dma_size : length;
+
+/* #warning FIXME Allocate contigous pages for struct se_mem elements */
+ se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+ if (!(se_mem->se_page)) {
+ printk(KERN_ERR "alloc_pages() failed\n");
+ goto out;
+ }
+
+ buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
+ if (!(buf)) {
+ printk(KERN_ERR "kmap_atomic() failed\n");
+ goto out;
+ }
+ memset(buf, 0, se_mem->se_len);
+ kunmap_atomic(buf, KM_IRQ0);
+
+ list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
+ T_TASK(cmd)->t_tasks_se_num++;
+
+ DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
+ " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
+ se_mem->se_off);
+
+ length -= se_mem->se_len;
+ }
+
+ DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
+ T_TASK(cmd)->t_tasks_se_num);
+
+ return 0;
+out:
+ return -1;
+}
+
+extern u32 transport_calc_sg_num(
+ struct se_task *task,
+ struct se_mem *in_se_mem,
+ u32 task_offset)
+{
+ struct se_cmd *se_cmd = task->task_se_cmd;
+ struct se_device *se_dev = SE_DEV(se_cmd);
+ struct se_mem *se_mem = in_se_mem;
+ struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
+ u32 sg_length, task_size = task->task_size, task_sg_num_padded;
+
+ while (task_size != 0) {
+ DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
+ " se_mem->se_off(%u) task_offset(%u)\n",
+ se_mem->se_page, se_mem->se_len,
+ se_mem->se_off, task_offset);
+
+ if (task_offset == 0) {
+ if (task_size >= se_mem->se_len) {
+ sg_length = se_mem->se_len;
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list)))
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ } else {
+ sg_length = task_size;
+ task_size -= sg_length;
+ goto next;
+ }
+
+ DEBUG_SC("sg_length(%u) task_size(%u)\n",
+ sg_length, task_size);
+ } else {
+ if ((se_mem->se_len - task_offset) > task_size) {
+ sg_length = task_size;
+ task_size -= sg_length;
+ goto next;
+ } else {
+ sg_length = (se_mem->se_len - task_offset);
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list)))
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ }
+
+ DEBUG_SC("sg_length(%u) task_size(%u)\n",
+ sg_length, task_size);
+
+ task_offset = 0;
+ }
+ task_size -= sg_length;
+next:
+ DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
+ task->task_no, task_size);
+
+ task->task_sg_num++;
+ }
+ /*
+ * Check if the fabric module driver is requesting that all
+ * struct se_task->task_sg[] be chained together.. If so,
+ * then allocate an extra padding SG entry for linking and
+ * marking the end of the chained SGL.
+ */
+ if (tfo->task_sg_chaining) {
+ task_sg_num_padded = (task->task_sg_num + 1);
+ task->task_padded_sg = 1;
+ } else
+ task_sg_num_padded = task->task_sg_num;
+
+ task->task_sg = kzalloc(task_sg_num_padded *
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!(task->task_sg)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " task->task_sg\n");
+ return 0;
+ }
+ sg_init_table(&task->task_sg[0], task_sg_num_padded);
+ /*
+ * Setup task->task_sg_bidi for SCSI READ payload for
+ * TCM/pSCSI passthrough if present for BIDI-COMMAND
+ */
+ if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
+ (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ task->task_sg_bidi = kzalloc(task_sg_num_padded *
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!(task->task_sg_bidi)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " task->task_sg_bidi\n");
+ return 0;
+ }
+ sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
+ }
+ /*
+ * For the chaining case, setup the proper end of SGL for the
+ * initial submission struct task into struct se_subsystem_api.
+ * This will be cleared later by transport_do_task_sg_chain()
+ */
+ if (task->task_padded_sg) {
+ sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
+ /*
+ * Added the 'if' check before marking end of bi-directional
+ * scatterlist (which gets created only in case of request
+ * (RD + WR).
+ */
+ if (task->task_sg_bidi)
+ sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
+ }
+
+ DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
+ " task_sg_num_padded(%u)\n", task->task_sg_num,
+ task_sg_num_padded);
+
+ return task->task_sg_num;
+}
+
+static inline int transport_set_tasks_sectors_disk(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ if ((lba + sectors) > transport_dev_end_lba(dev)) {
+ task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+ if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ }
+ } else {
+ if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ } else
+ task->task_sectors = sectors;
+ }
+
+ return 0;
+}
+
+static inline int transport_set_tasks_sectors_non_disk(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ } else
+ task->task_sectors = sectors;
+
+ return 0;
+}
+
+static inline int transport_set_tasks_sectors(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
+ transport_set_tasks_sectors_disk(task, dev, lba, sectors,
+ max_sectors_set) :
+ transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
+ max_sectors_set);
+}
+
+static int transport_map_sg_to_mem(
+ struct se_cmd *cmd,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ u32 *se_mem_cnt)
+{
+ struct se_mem *se_mem;
+ struct scatterlist *sg;
+ u32 sg_count = 1, cmd_size = cmd->data_length;
+
+ if (!in_mem) {
+ printk(KERN_ERR "No source scatterlist\n");
+ return -1;
+ }
+ sg = (struct scatterlist *)in_mem;
+
+ while (cmd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+ DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
+ " sg_page: %p offset: %d length: %d\n", cmd_size,
+ sg_page(sg), sg->offset, sg->length);
+
+ se_mem->se_page = sg_page(sg);
+ se_mem->se_off = sg->offset;
+
+ if (cmd_size > sg->length) {
+ se_mem->se_len = sg->length;
+ sg = sg_next(sg);
+ sg_count++;
+ } else
+ se_mem->se_len = cmd_size;
+
+ cmd_size -= se_mem->se_len;
+
+ DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
+ *se_mem_cnt, cmd_size);
+ DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
+ se_mem->se_page, se_mem->se_off, se_mem->se_len);
+
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+ }
+
+ DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
+ " struct se_mem\n", sg_count, *se_mem_cnt);
+
+ if (sg_count != *se_mem_cnt)
+ BUG();
+
+ return 0;
+}
+
+/* transport_map_mem_to_sg():
+ *
+ *
+ */
+int transport_map_mem_to_sg(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct se_cmd *se_cmd = task->task_se_cmd;
+ struct se_mem *se_mem = in_se_mem;
+ struct scatterlist *sg = (struct scatterlist *)in_mem;
+ u32 task_size = task->task_size, sg_no = 0;
+
+ if (!sg) {
+ printk(KERN_ERR "Unable to locate valid struct"
+ " scatterlist pointer\n");
+ return -1;
+ }
+
+ while (task_size != 0) {
+ /*
+ * Setup the contigious array of scatterlists for
+ * this struct se_task.
+ */
+ sg_assign_page(sg, se_mem->se_page);
+
+ if (*task_offset == 0) {
+ sg->offset = se_mem->se_off;
+
+ if (task_size >= se_mem->se_len) {
+ sg->length = se_mem->se_len;
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list))) {
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ (*se_mem_cnt)++;
+ }
+ } else {
+ sg->length = task_size;
+ /*
+ * Determine if we need to calculate an offset
+ * into the struct se_mem on the next go around..
+ */
+ task_size -= sg->length;
+ if (!(task_size))
+ *task_offset = sg->length;
+
+ goto next;
+ }
+
+ } else {
+ sg->offset = (*task_offset + se_mem->se_off);
+
+ if ((se_mem->se_len - *task_offset) > task_size) {
+ sg->length = task_size;
+ /*
+ * Determine if we need to calculate an offset
+ * into the struct se_mem on the next go around..
+ */
+ task_size -= sg->length;
+ if (!(task_size))
+ *task_offset += sg->length;
+
+ goto next;
+ } else {
+ sg->length = (se_mem->se_len - *task_offset);
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list))) {
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ (*se_mem_cnt)++;
+ }
+ }
+
+ *task_offset = 0;
+ }
+ task_size -= sg->length;
+next:
+ DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
+ " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
+ sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
+
+ sg_no++;
+ if (!(task_size))
+ break;
+
+ sg = sg_next(sg);
+
+ if (task_size > se_cmd->data_length)
+ BUG();
+ }
+ *out_se_mem = se_mem;
+
+ DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
+ " SGs\n", task->task_no, *se_mem_cnt, sg_no);
+
+ return 0;
+}
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+ struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
+ struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
+ struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+ struct se_task *task;
+ struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
+ u32 task_sg_num = 0, sg_count = 0;
+ int i;
+
+ if (tfo->task_sg_chaining == 0) {
+ printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
+ " %s\n", tfo->get_fabric_name());
+ dump_stack();
+ return;
+ }
+ /*
+ * Walk the struct se_task list and setup scatterlist chains
+ * for each contiguosly allocated struct se_task->task_sg[].
+ */
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (!(task->task_sg) || !(task->task_padded_sg))
+ continue;
+
+ if (sg_head && sg_link) {
+ sg_head_cur = &task->task_sg[0];
+ sg_link_cur = &task->task_sg[task->task_sg_num];
+ /*
+ * Either add chain or mark end of scatterlist
+ */
+ if (!(list_is_last(&task->t_list,
+ &T_TASK(cmd)->t_task_list))) {
+ /*
+ * Clear existing SGL termination bit set in
+ * transport_calc_sg_num(), see sg_mark_end()
+ */
+ sg_end_cur = &task->task_sg[task->task_sg_num - 1];
+ sg_end_cur->page_link &= ~0x02;
+
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += (task->task_sg_num + 1);
+ } else
+ sg_count += task->task_sg_num;
+
+ sg_head = sg_head_cur;
+ sg_link = sg_link_cur;
+ task_sg_num = task->task_sg_num;
+ continue;
+ }
+ sg_head = sg_first = &task->task_sg[0];
+ sg_link = &task->task_sg[task->task_sg_num];
+ task_sg_num = task->task_sg_num;
+ /*
+ * Check for single task..
+ */
+ if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
+ /*
+ * Clear existing SGL termination bit set in
+ * transport_calc_sg_num(), see sg_mark_end()
+ */
+ sg_end = &task->task_sg[task->task_sg_num - 1];
+ sg_end->page_link &= ~0x02;
+ sg_count += (task->task_sg_num + 1);
+ } else
+ sg_count += task->task_sg_num;
+ }
+ /*
+ * Setup the starting pointer and total t_tasks_sg_linked_no including
+ * padding SGs for linking and to mark the end.
+ */
+ T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+ T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+ DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ T_TASK(cmd)->t_tasks_sg_chained_no);
+
+ for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+ T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+ DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+ sg, sg_page(sg), sg->length, sg->offset);
+ if (sg_is_chain(sg))
+ DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ if (sg_is_last(sg))
+ DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ }
+
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+static int transport_do_se_mem_map(
+ struct se_device *dev,
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset_in)
+{
+ u32 task_offset = *task_offset_in;
+ int ret = 0;
+ /*
+ * se_subsystem_api_t->do_se_mem_map is used when internal allocation
+ * has been done by the transport plugin.
+ */
+ if (TRANSPORT(dev)->do_se_mem_map) {
+ ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
+ in_mem, in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+ if (ret == 0)
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+
+ return ret;
+ }
+ /*
+ * This is the normal path for all normal non BIDI and BIDI-COMMAND
+ * WRITE payloads.. If we need to do BIDI READ passthrough for
+ * TCM/pSCSI the first call to transport_do_se_mem_map ->
+ * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
+ * allocation for task->task_sg_bidi, and the subsequent call to
+ * transport_do_se_mem_map() from transport_generic_get_cdb_count()
+ */
+ if (!(task->task_sg_bidi)) {
+ /*
+ * Assume default that transport plugin speaks preallocated
+ * scatterlists.
+ */
+ if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
+ return -1;
+ /*
+ * struct se_task->task_sg now contains the struct scatterlist array.
+ */
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+ in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+ }
+ /*
+ * Handle the se_mem_list -> struct task->task_sg_bidi
+ * memory map for the extra BIDI READ payload
+ */
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
+ in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+}
+
+static u32 transport_generic_get_cdb_count(
+ struct se_cmd *cmd,
+ unsigned long long lba,
+ u32 sectors,
+ enum dma_data_direction data_direction,
+ struct list_head *mem_list,
+ int set_counts)
+{
+ unsigned char *cdb = NULL;
+ struct se_task *task;
+ struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+ struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+ struct se_device *dev = SE_DEV(cmd);
+ int max_sectors_set = 0, ret;
+ u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+
+ if (!mem_list) {
+ printk(KERN_ERR "mem_list is NULL in transport_generic_get"
+ "_cdb_count()\n");
+ return 0;
+ }
+ /*
+ * While using RAMDISK_DR backstores is the only case where
+ * mem_list will ever be empty at this point.
+ */
+ if (!(list_empty(mem_list)))
+ se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+ /*
+ * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
+ * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
+ */
+ if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+ !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
+ (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
+ se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
+ struct se_mem, se_list);
+
+ while (sectors) {
+ DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
+ transport_dev_end_lba(dev));
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!(task))
+ goto out;
+
+ transport_set_tasks_sectors(task, dev, lba, sectors,
+ &max_sectors_set);
+
+ task->task_lba = lba;
+ lba += task->task_sectors;
+ sectors -= task->task_sectors;
+ task->task_size = (task->task_sectors *
+ DEV_ATTRIB(dev)->block_size);
+
+ cdb = TRANSPORT(dev)->get_cdb(task);
+ if ((cdb)) {
+ memcpy(cdb, T_TASK(cmd)->t_task_cdb,
+ scsi_command_size(T_TASK(cmd)->t_task_cdb));
+ cmd->transport_split_cdb(task->task_lba,
+ &task->task_sectors, cdb);
+ }
+
+ /*
+ * Perform the SE OBJ plugin and/or Transport plugin specific
+ * mapping for T_TASK(cmd)->t_mem_list. And setup the
+ * task->task_sg and if necessary task->task_sg_bidi
+ */
+ ret = transport_do_se_mem_map(dev, task, mem_list,
+ NULL, se_mem, &se_mem_lout, &se_mem_cnt,
+ &task_offset_in);
+ if (ret < 0)
+ goto out;
+
+ se_mem = se_mem_lout;
+ /*
+ * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
+ * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
+ *
+ * Note that the first call to transport_do_se_mem_map() above will
+ * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
+ * -> transport_calc_sg_num(), and the second here will do the
+ * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+ */
+ if (task->task_sg_bidi != NULL) {
+ ret = transport_do_se_mem_map(dev, task,
+ T_TASK(cmd)->t_mem_bidi_list, NULL,
+ se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
+ &task_offset_in);
+ if (ret < 0)
+ goto out;
+
+ se_mem_bidi = se_mem_bidi_lout;
+ }
+ task_cdbs++;
+
+ DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
+ task_cdbs, task->task_sg_num);
+
+ if (max_sectors_set) {
+ max_sectors_set = 0;
+ continue;
+ }
+
+ if (!sectors)
+ break;
+ }
+
+ if (set_counts) {
+ atomic_inc(&T_TASK(cmd)->t_fe_count);
+ atomic_inc(&T_TASK(cmd)->t_se_count);
+ }
+
+ DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
+ ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+
+ return task_cdbs;
+out:
+ return 0;
+}
+
+static int
+transport_map_control_cmd_to_task(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *cdb;
+ struct se_task *task;
+ int ret;
+
+ task = transport_generic_get_task(cmd, cmd->data_direction);
+ if (!task)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ cdb = TRANSPORT(dev)->get_cdb(task);
+ if (cdb)
+ memcpy(cdb, cmd->t_task->t_task_cdb,
+ scsi_command_size(cmd->t_task->t_task_cdb));
+
+ task->task_size = cmd->data_length;
+ task->task_sg_num =
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+
+ atomic_inc(&cmd->t_task->t_fe_count);
+ atomic_inc(&cmd->t_task->t_se_count);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
+ struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+ u32 se_mem_cnt = 0, task_offset = 0;
+
+ BUG_ON(list_empty(cmd->t_task->t_mem_list));
+
+ ret = transport_do_se_mem_map(dev, task,
+ cmd->t_task->t_mem_list, NULL, se_mem,
+ &se_mem_lout, &se_mem_cnt, &task_offset);
+ if (ret < 0)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ if (dev->transport->map_task_SG)
+ return dev->transport->map_task_SG(task);
+ return 0;
+ } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+ if (dev->transport->map_task_non_SG)
+ return dev->transport->map_task_non_SG(task);
+ return 0;
+ } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ if (dev->transport->cdb_none)
+ return dev->transport->cdb_none(task);
+ return 0;
+ } else {
+ BUG();
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ }
+}
+
+/* transport_generic_new_cmd(): Called from transport_processing_thread()
+ *
+ * Allocate storage transport resources from a set of values predefined
+ * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
+ * Any non zero return here is treated as an "out of resource' op here.
+ */
+ /*
+ * Generate struct se_task(s) and/or their payloads for this CDB.
+ */
+static int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+ struct se_portal_group *se_tpg;
+ struct se_task *task;
+ struct se_device *dev = SE_DEV(cmd);
+ int ret = 0;
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+ * memory, and is directly calling transport_generic_map_mem_to_cmd()
+ * to setup beforehand the linked list of physical memory at
+ * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+ ret = transport_allocate_resources(cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = transport_get_sectors(cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = transport_new_cmd_obj(cmd);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Determine if the calling TCM fabric module is talking to
+ * Linux/NET via kernel sockets and needs to allocate a
+ * struct iovec array to complete the struct se_cmd
+ */
+ se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
+ if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
+ ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
+ if (ret < 0)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ }
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_sent))
+ continue;
+ if (!dev->transport->map_task_SG)
+ continue;
+
+ ret = dev->transport->map_task_SG(task);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ ret = transport_map_control_cmd_to_task(cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+ * This WRITE struct se_cmd (and all of its associated struct se_task's)
+ * will be added to the struct se_device execution queue after its WRITE
+ * data has arrived. (ie: It gets handled by the transport processing
+ * thread a second time)
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ transport_add_tasks_to_state_queue(cmd);
+ return transport_generic_write_pending(cmd);
+ }
+ /*
+ * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+ * to the execution queue.
+ */
+ transport_execute_tasks(cmd);
+ return 0;
+}
+
+/* transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+#if 0
+ /*
+ * Copy SCSI Presented DTL sector(s) from received buffers allocated to
+ * original EDTL
+ */
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (!T_TASK(cmd)->t_tasks_se_num) {
+ unsigned char *dst, *buf =
+ (unsigned char *)T_TASK(cmd)->t_task_buf;
+
+ dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
+ if (!(dst)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " WRITE underflow\n");
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ return;
+ }
+ memcpy(dst, buf, cmd->cmd_spdtl);
+
+ kfree(T_TASK(cmd)->t_task_buf);
+ T_TASK(cmd)->t_task_buf = dst;
+ } else {
+ struct scatterlist *sg =
+ (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
+ struct scatterlist *orig_sg;
+
+ orig_sg = kzalloc(sizeof(struct scatterlist) *
+ T_TASK(cmd)->t_tasks_se_num,
+ GFP_KERNEL))) {
+ if (!(orig_sg)) {
+ printk(KERN_ERR "Unable to allocate memory"
+ " for WRITE underflow\n");
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ return;
+ }
+
+ memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
+ sizeof(struct scatterlist) *
+ T_TASK(cmd)->t_tasks_se_num);
+
+ cmd->data_length = cmd->cmd_spdtl;
+ /*
+ * FIXME, clear out original struct se_task and state
+ * information.
+ */
+ if (transport_generic_new_cmd(cmd) < 0) {
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ kfree(orig_sg);
+ return;
+ }
+
+ transport_memcpy_write_sg(cmd, orig_sg);
+ }
+ }
+#endif
+ transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+/* transport_generic_write_pending():
+ *
+ *
+ */
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * For the TCM control CDBs using a contiguous buffer, do the memcpy
+ * from the passed Linux/SCSI struct scatterlist located at
+ * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
+ * T_TASK(se_cmd)->t_task_buf.
+ */
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+ transport_memcpy_read_contig(cmd,
+ T_TASK(cmd)->t_task_buf,
+ T_TASK(cmd)->t_task_pt_sgl);
+ /*
+ * Clear the se_cmd for WRITE_PENDING status in order to set
+ * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+ * can be called from HW target mode interrupt code. This is safe
+ * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+ * because the se_cmd->se_lun pointer is not being cleared.
+ */
+ transport_cmd_check_stop(cmd, 1, 0);
+
+ /*
+ * Call the fabric write_pending function here to let the
+ * frontend know that WRITE buffers are ready.
+ */
+ ret = CMD_TFO(cmd)->write_pending(cmd);
+ if (ret < 0)
+ return ret;
+
+ return PYX_TRANSPORT_WRITE_PENDING;
+}
+
+/* transport_release_cmd_to_pool():
+ *
+ *
+ */
+void transport_release_cmd_to_pool(struct se_cmd *cmd)
+{
+ BUG_ON(!T_TASK(cmd));
+ BUG_ON(!CMD_TFO(cmd));
+
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+}
+EXPORT_SYMBOL(transport_release_cmd_to_pool);
+
+/* transport_generic_free_cmd():
+ *
+ * Called from processing frontend to release storage engine resources
+ */
+void transport_generic_free_cmd(
+ struct se_cmd *cmd,
+ int wait_for_tasks,
+ int release_to_pool,
+ int session_reinstatement)
+{
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
+ transport_release_cmd_to_pool(cmd);
+ else {
+ core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+ if (SE_LUN(cmd)) {
+#if 0
+ printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+ " SE_LUN(cmd)\n", cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ transport_lun_remove_cmd(cmd);
+ }
+
+ if (wait_for_tasks && cmd->transport_wait_for_tasks)
+ cmd->transport_wait_for_tasks(cmd, 0, 0);
+
+ transport_generic_remove(cmd, release_to_pool,
+ session_reinstatement);
+ }
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+static void transport_nop_wait_for_tasks(
+ struct se_cmd *cmd,
+ int remove_cmd,
+ int session_reinstatement)
+{
+ return;
+}
+
+/* transport_lun_wait_for_tasks():
+ *
+ * Called from ConfigFS context to stop the passed struct se_cmd to allow
+ * an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+ unsigned long flags;
+ int ret;
+ /*
+ * If the frontend has already requested this struct se_cmd to
+ * be stopped, we can safely ignore this struct se_cmd.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+ " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ transport_cmd_check_stop(cmd, 1, 0);
+ return -1;
+ }
+ atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+ ret = transport_stop_tasks_for_cmd(cmd);
+
+ DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
+ " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+ if (!ret) {
+ DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+ DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ }
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+ return 0;
+}
+
+/* #define DEBUG_CLEAR_LUN */
+#ifdef DEBUG_CLEAR_LUN
+#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CLEAR_L(x...)
+#endif
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+ struct se_cmd *cmd = NULL;
+ unsigned long lun_flags, cmd_flags;
+ /*
+ * Do exception processing and return CHECK_CONDITION status to the
+ * Initiator Port.
+ */
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ while (!list_empty_careful(&lun->lun_cmd_list)) {
+ cmd = list_entry(lun->lun_cmd_list.next,
+ struct se_cmd, se_lun_list);
+ list_del(&cmd->se_lun_list);
+
+ if (!(T_TASK(cmd))) {
+ printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
+ "[i,t]_state: %u/%u\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ BUG();
+ }
+ atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ /*
+ * This will notify iscsi_target_transport.c:
+ * transport_cmd_check_stop() that a LUN shutdown is in
+ * progress for the iscsi_cmd_t.
+ */
+ spin_lock(&T_TASK(cmd)->t_state_lock);
+ DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+ "_lun_stop for ITT: 0x%08x\n",
+ SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
+ spin_unlock(&T_TASK(cmd)->t_state_lock);
+
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+ if (!(SE_LUN(cmd))) {
+ printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ BUG();
+ }
+ /*
+ * If the Storage engine still owns the iscsi_cmd_t, determine
+ * and/or stop its context.
+ */
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+ "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ continue;
+ }
+
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+ "_wait_for_tasks(): SUCCESS\n",
+ SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ goto check_cond;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+
+ transport_free_dev_tasks(cmd);
+ /*
+ * The Storage engine stopped this struct se_cmd before it was
+ * send to the fabric frontend for delivery back to the
+ * Initiator Node. Return this SCSI CDB back with an
+ * CHECK_CONDITION status.
+ */
+check_cond:
+ transport_send_check_condition_and_sense(cmd,
+ TCM_NON_EXISTENT_LUN, 0);
+ /*
+ * If the fabric frontend is waiting for this iscsi_cmd_t to
+ * be released, notify the waiting thread now that LU has
+ * finished accessing it.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
+ DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+ " struct se_cmd: %p ITT: 0x%08x\n",
+ lun->unpacked_lun,
+ cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ cmd_flags);
+ transport_cmd_check_stop(cmd, 1, 0);
+ complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ continue;
+ }
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+ lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ }
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+ struct se_lun *lun = (struct se_lun *)p;
+
+ __transport_clear_lun_from_sessions(lun);
+ complete(&lun->lun_shutdown_comp);
+
+ return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+ struct task_struct *kt;
+
+ kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+ "tcm_cl_%u", lun->unpacked_lun);
+ if (IS_ERR(kt)) {
+ printk(KERN_ERR "Unable to start clear_lun thread\n");
+ return -1;
+ }
+ wait_for_completion(&lun->lun_shutdown_comp);
+
+ return 0;
+}
+
+/* transport_generic_wait_for_tasks():
+ *
+ * Called from frontend or passthrough context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+static void transport_generic_wait_for_tasks(
+ struct se_cmd *cmd,
+ int remove_cmd,
+ int session_reinstatement)
+{
+ unsigned long flags;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * If we are already stopped due to an external event (ie: LUN shutdown)
+ * sleep until the connection can have the passed struct se_cmd back.
+ * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+ * transport_clear_lun_from_sessions() once the ConfigFS context caller
+ * has completed its operation on the struct se_cmd.
+ */
+ if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+ " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+ "_stop_comp); for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ /*
+ * There is a special case for WRITES where a FE exception +
+ * LUN shutdown means ConfigFS context is still sleeping on
+ * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+ * We go ahead and up transport_lun_stop_comp just to be sure
+ * here.
+ */
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_all_task_dev_remove_state(cmd);
+ /*
+ * At this point, the frontend who was the originator of this
+ * struct se_cmd, now owns the structure and can be released through
+ * normal means below.
+ */
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+ " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+ "stop_comp); for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ }
+ if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+ goto remove;
+
+ atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+ " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
+ " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state);
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+ wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+ "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+remove:
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (!remove_cmd)
+ return;
+
+ transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+}
+
+static int transport_get_sense_codes(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ *asc = cmd->scsi_asc;
+ *ascq = cmd->scsi_ascq;
+
+ return 0;
+}
+
+static int transport_set_sense_codes(
+ struct se_cmd *cmd,
+ u8 asc,
+ u8 ascq)
+{
+ cmd->scsi_asc = asc;
+ cmd->scsi_ascq = ascq;
+
+ return 0;
+}
+
+int transport_send_check_condition_and_sense(
+ struct se_cmd *cmd,
+ u8 reason,
+ int from_transport)
+{
+ unsigned char *buffer = cmd->sense_buffer;
+ unsigned long flags;
+ int offset;
+ u8 asc = 0, ascq = 0;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return 0;
+ }
+ cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ if (!reason && from_transport)
+ goto after_reason;
+
+ if (!from_transport)
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ /*
+ * Data Segment and SenseLength of the fabric response PDU.
+ *
+ * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+ * from include/scsi/scsi_cmnd.h
+ */
+ offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ TRANSPORT_SENSE_BUFFER);
+ /*
+ * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
+ * SENSE KEY values from include/scsi/scsi.h
+ */
+ switch (reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_SECTOR_COUNT_TOO_MANY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID COMMAND OPERATION CODE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+ break;
+ case TCM_UNKNOWN_MODE_PAGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID FIELD IN CDB */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ break;
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* BUS DEVICE RESET FUNCTION OCCURRED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+ break;
+ case TCM_INCORRECT_AMOUNT_OF_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ /* NOT ENOUGH UNSOLICITED DATA */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+ break;
+ case TCM_INVALID_CDB_FIELD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN CDB */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ break;
+ case TCM_INVALID_PARAMETER_LIST:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN PARAMETER LIST */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+ break;
+ case TCM_UNEXPECTED_UNSOLICITED_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ /* UNEXPECTED_UNSOLICITED_DATA */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+ break;
+ case TCM_SERVICE_CRC_ERROR:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* PROTOCOL SERVICE CRC ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+ /* N/A */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+ break;
+ case TCM_SNACK_REJECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* READ ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+ /* FAILED RETRANSMISSION REQUEST */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+ break;
+ case TCM_WRITE_PROTECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* DATA PROTECT */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ /* WRITE PROTECTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* UNIT ATTENTION */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+ buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ break;
+ case TCM_CHECK_CONDITION_NOT_READY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* Not Ready */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ transport_get_sense_codes(cmd, &asc, &ascq);
+ buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ break;
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ default:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT COMMUNICATION FAILURE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+ break;
+ }
+ /*
+ * This code uses linux/include/scsi/scsi.h SAM status codes!
+ */
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Automatically padded, this value is encoded in the fabric's
+ * data_length response PDU containing the SCSI defined sense data.
+ */
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+ CMD_TFO(cmd)->queue_status(cmd);
+ return 0;
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+ int ret = 0;
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
+ if (!(send_status) ||
+ (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
+#if 0
+ printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+ " status for CDB: 0x%02x ITT: 0x%08x\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ CMD_TFO(cmd)->queue_status(cmd);
+ ret = 1;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+ /*
+ * If there are still expected incoming fabric WRITEs, we wait
+ * until until they have completed before sending a TASK_ABORTED
+ * response. This response with TASK_ABORTED status will be
+ * queued back to fabric module by transport_check_aborted_status().
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+ atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+ smp_mb__after_atomic_inc();
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ transport_new_cmd_failure(cmd);
+ return;
+ }
+ }
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+ printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ CMD_TFO(cmd)->queue_status(cmd);
+}
+
+/* transport_generic_do_tmr():
+ *
+ *
+ */
+int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+ struct se_cmd *ref_cmd;
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_tmr_req *tmr = cmd->se_tmr_req;
+ int ret;
+
+ switch (tmr->function) {
+ case ABORT_TASK:
+ ref_cmd = tmr->ref_cmd;
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ case ABORT_TASK_SET:
+ case CLEAR_ACA:
+ case CLEAR_TASK_SET:
+ tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ break;
+ case LUN_RESET:
+ ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+ tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+ TMR_FUNCTION_REJECTED;
+ break;
+#if 0
+ case TARGET_WARM_RESET:
+ transport_generic_host_reset(dev->se_hba);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ case TARGET_COLD_RESET:
+ transport_generic_host_reset(dev->se_hba);
+ transport_generic_cold_reset(dev->se_hba);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+#endif
+ default:
+ printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+ tmr->function);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ }
+
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ CMD_TFO(cmd)->queue_tm_rsp(cmd);
+
+ transport_cmd_check_stop(cmd, 2, 0);
+ return 0;
+}
+
+/*
+ * Called with spin_lock_irq(&dev->execute_task_lock); held
+ *
+ */
+static struct se_task *
+transport_get_task_from_state_list(struct se_device *dev)
+{
+ struct se_task *task;
+
+ if (list_empty(&dev->state_task_list))
+ return NULL;
+
+ list_for_each_entry(task, &dev->state_task_list, t_state_list)
+ break;
+
+ list_del(&task->t_state_list);
+ atomic_set(&task->task_state_active, 0);
+
+ return task;
+}
+
+static void transport_processing_shutdown(struct se_device *dev)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr;
+ struct se_task *task;
+ u8 state;
+ unsigned long flags;
+ /*
+ * Empty the struct se_device's struct se_task state list.
+ */
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ while ((task = transport_get_task_from_state_list(dev))) {
+ if (!(TASK_CMD(task))) {
+ printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ continue;
+ }
+ cmd = TASK_CMD(task);
+
+ if (!T_TASK(cmd)) {
+ printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+ " %p ITT: 0x%08x\n", task, cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ continue;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
+ " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+ " %d/%d cdb: 0x%02x\n", cmd, task,
+ CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+ cmd->t_state, cmd->deferred_t_state,
+ T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+ " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
+ " t_transport_stop: %d t_transport_sent: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+ " %p\n", task, dev);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+ task, dev);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ }
+ __transport_stop_task_timer(task, &flags);
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("Skipping task: %p, dev: %p for"
+ " t_task_cdbs_ex_left: %d\n", task, dev,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+ DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+ " %p\n", task, dev);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ transport_send_check_condition_and_sense(
+ cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
+ 0);
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+ task, dev);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ transport_send_check_condition_and_sense(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Empty the struct se_device's struct se_cmd list.
+ */
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
+ spin_unlock_irqrestore(
+ &dev->dev_queue_obj->cmd_queue_lock, flags);
+ cmd = (struct se_cmd *)qr->cmd;
+ state = qr->state;
+ kfree(qr);
+
+ DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+ cmd, state);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ transport_send_check_condition_and_sense(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ transport_lun_remove_cmd(cmd);
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+}
+
+/* transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+ int ret, t_state;
+ struct se_cmd *cmd;
+ struct se_device *dev = (struct se_device *) param;
+ struct se_queue_req *qr;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
+ atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+ kthread_should_stop());
+ if (ret < 0)
+ goto out;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
+ spin_unlock_irq(&dev->dev_status_lock);
+ transport_processing_shutdown(dev);
+ continue;
+ }
+ spin_unlock_irq(&dev->dev_status_lock);
+
+get_cmd:
+ __transport_execute_tasks(dev);
+
+ qr = transport_get_qr_from_queue(dev->dev_queue_obj);
+ if (!(qr))
+ continue;
+
+ cmd = (struct se_cmd *)qr->cmd;
+ t_state = qr->state;
+ kfree(qr);
+
+ switch (t_state) {
+ case TRANSPORT_NEW_CMD_MAP:
+ if (!(CMD_TFO(cmd)->new_cmd_map)) {
+ printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+ " NULL for TRANSPORT_NEW_CMD_MAP\n");
+ BUG();
+ }
+ ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+ if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL,
+ 0, (cmd->data_direction !=
+ DMA_TO_DEVICE));
+ break;
+ }
+ /* Fall through */
+ case TRANSPORT_NEW_CMD:
+ ret = transport_generic_new_cmd(cmd);
+ if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL,
+ 0, (cmd->data_direction !=
+ DMA_TO_DEVICE));
+ }
+ break;
+ case TRANSPORT_PROCESS_WRITE:
+ transport_generic_process_write(cmd);
+ break;
+ case TRANSPORT_COMPLETE_OK:
+ transport_stop_all_task_timers(cmd);
+ transport_generic_complete_ok(cmd);
+ break;
+ case TRANSPORT_REMOVE:
+ transport_generic_remove(cmd, 1, 0);
+ break;
+ case TRANSPORT_PROCESS_TMR:
+ transport_generic_do_tmr(cmd);
+ break;
+ case TRANSPORT_COMPLETE_FAILURE:
+ transport_generic_request_failure(cmd, NULL, 1, 1);
+ break;
+ case TRANSPORT_COMPLETE_TIMEOUT:
+ transport_stop_all_task_timers(cmd);
+ transport_generic_request_timeout(cmd);
+ break;
+ default:
+ printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+ " %d for ITT: 0x%08x i_state: %d on SE LUN:"
+ " %u\n", t_state, cmd->deferred_t_state,
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd),
+ SE_LUN(cmd)->unpacked_lun);
+ BUG();
+ }
+
+ goto get_cmd;
+ }
+
+out:
+ transport_release_all_cmds(dev);
+ dev->process_thread = NULL;
+ return 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 000000000000..a2ef346087e8
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+
+ if (!(sess))
+ return 0;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return 0;
+
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count)))
+ return 0;
+ /*
+ * From sam4r14, section 5.14 Unit attention condition:
+ *
+ * a) if an INQUIRY command enters the enabled command state, the
+ * device server shall process the INQUIRY command and shall neither
+ * report nor clear any unit attention condition;
+ * b) if a REPORT LUNS command enters the enabled command state, the
+ * device server shall process the REPORT LUNS command and shall not
+ * report any unit attention condition;
+ * e) if a REQUEST SENSE command enters the enabled command state while
+ * a unit attention condition exists for the SCSI initiator port
+ * associated with the I_T nexus on which the REQUEST SENSE command
+ * was received, then the device server shall process the command
+ * and either:
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ return 0;
+ default:
+ return -1;
+ }
+
+ return -1;
+}
+
+int core_scsi3_ua_allocate(
+ struct se_node_acl *nacl,
+ u32 unpacked_lun,
+ u8 asc,
+ u8 ascq)
+{
+ struct se_dev_entry *deve;
+ struct se_ua *ua, *ua_p, *ua_tmp;
+ /*
+ * PASSTHROUGH OPS
+ */
+ if (!(nacl))
+ return -1;
+
+ ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+ if (!(ua)) {
+ printk(KERN_ERR "Unable to allocate struct se_ua\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&ua->ua_dev_list);
+ INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+ ua->ua_nacl = nacl;
+ ua->ua_asc = asc;
+ ua->ua_ascq = ascq;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[unpacked_lun];
+
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+ /*
+ * Do not report the same UNIT ATTENTION twice..
+ */
+ if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+ kmem_cache_free(se_ua_cache, ua);
+ return 0;
+ }
+ /*
+ * Attach the highest priority Unit Attention to
+ * the head of the list following sam4r14,
+ * Section 5.14 Unit Attention Condition:
+ *
+ * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+ * POWER ON OCCURRED or
+ * DEVICE INTERNAL RESET
+ * SCSI BUS RESET OCCURRED or
+ * MICROCODE HAS BEEN CHANGED or
+ * protocol specific
+ * BUS DEVICE RESET FUNCTION OCCURRED
+ * I_T NEXUS LOSS OCCURRED
+ * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+ * all others Lowest
+ *
+ * Each of the ASCQ codes listed above are defined in
+ * the 29h ASC family, see spc4r17 Table D.1
+ */
+ if (ua_p->ua_asc == 0x29) {
+ if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+ list_add(&ua->ua_nacl_list,
+ &deve->ua_list);
+ else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ } else if (ua_p->ua_asc == 0x2a) {
+ /*
+ * Incoming Family 29h ASCQ codes will override
+ * Family 2AHh ASCQ codes for Unit Attention condition.
+ */
+ if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+ list_add(&ua->ua_nacl_list,
+ &deve->ua_list);
+ else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ } else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ atomic_inc(&deve->ua_count);
+ smp_mb__after_atomic_inc();
+ return 0;
+ }
+ list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+ " 0x%02x, ASCQ: 0x%02x\n",
+ TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+ asc, ascq);
+
+ atomic_inc(&deve->ua_count);
+ smp_mb__after_atomic_inc();
+ return 0;
+}
+
+void core_scsi3_ua_release_all(
+ struct se_dev_entry *deve)
+{
+ struct se_ua *ua, *ua_p;
+
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+ struct se_ua *ua = NULL, *ua_p;
+ int head = 1;
+
+ if (!(sess))
+ return;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count))) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return;
+ }
+ /*
+ * The highest priority Unit Attentions are placed at the head of the
+ * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+ * sense data for the received CDB.
+ */
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ /*
+ * For ua_intlck_ctrl code not equal to 00b, only report the
+ * highest priority UNIT_ATTENTION and ASC/ASCQ without
+ * clearing it.
+ */
+ if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ break;
+ }
+ /*
+ * Otherwise for the default 00b, release the UNIT ATTENTION
+ * condition. Return the ASC/ASCQ of the higest priority UA
+ * (head of the list) in the outgoing CHECK_CONDITION + sense.
+ */
+ if (head) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ head = 0;
+ }
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+ " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+ " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+ TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
+ cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+ struct se_ua *ua = NULL, *ua_p;
+ int head = 1;
+
+ if (!(sess))
+ return -1;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return -1;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count))) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -1;
+ }
+ /*
+ * The highest priority Unit Attentions are placed at the head of the
+ * struct se_dev_entry->ua_list. The First (and hence highest priority)
+ * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+ * matching struct se_lun.
+ *
+ * Once the returning ASC/ASCQ values are set, we go ahead and
+ * release all of the Unit Attention conditions for the assoicated
+ * struct se_lun.
+ */
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ if (head) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ head = 0;
+ }
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+ " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+ " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ cmd->orig_fe_lun, *asc, *ascq);
+
+ return (head) ? -1 : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 000000000000..6e6b03460a1a
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
+#define ASCQ_29H_POWER_ON_OCCURRED 0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED 0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+ u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f59cdd9..ade0568c07a4 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+ unsigned seq)
+{
+ int left;
+ spin_lock_irq(&dev->work_lock);
+ left = seq - work->done_seq;
+ spin_unlock_irq(&dev->work_lock);
+ return left <= 0;
+}
+
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
- int left;
int flushing;
spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, ({
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq <= 0;
- spin_unlock_irq(&dev->work_lock);
- left;
- }));
+ wait_event(work->done, vhost_work_seq_done(dev, work, seq));
spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 0c99de0562ca..b358d045f130 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -483,7 +483,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
info->screen_base, info->fix.smem_start);
}
-static int __init ep93xxfb_probe(struct platform_device *pdev)
+static int __devinit ep93xxfb_probe(struct platform_device *pdev)
{
struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
struct fb_info *info;
@@ -598,7 +598,7 @@ failed:
return err;
}
-static int ep93xxfb_remove(struct platform_device *pdev)
+static int __devexit ep93xxfb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct ep93xx_fbi *fbi = info->par;
@@ -622,7 +622,7 @@ static int ep93xxfb_remove(struct platform_device *pdev)
static struct platform_driver ep93xxfb_driver = {
.probe = ep93xxfb_probe,
- .remove = ep93xxfb_remove,
+ .remove = __devexit_p(ep93xxfb_remove),
.driver = {
.name = "ep93xx-fb",
.owner = THIS_MODULE,