aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-07-13 17:28:09 -0700
committerDavid S. Miller <davem@davemloft.net>2015-07-13 17:28:09 -0700
commit638d3c63811e31b2745f7fdd568b38c8abcffe03 (patch)
tree606426ab4de84e59c5f50e1e3cce6e24819d45af /drivers
parentbridge: mdb: add vlan support for user entries (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-638d3c63811e31b2745f7fdd568b38c8abcffe03.tar.xz
linux-dev-638d3c63811e31b2745f7fdd568b38c8abcffe03.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bridge/br_mdb.c Minor conflict in br_mdb.c, in 'net' we added a memset of the on-stack 'ip' variable whereas in 'net-next' we assign a new member 'vid'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig20
-rw-r--r--drivers/acpi/acpi_lpss.c7
-rw-r--r--drivers/acpi/acpica/accommon.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acnamesp.h1
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/acstruct.h1
-rw-r--r--drivers/acpi/acpica/acutils.h64
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c7
-rw-r--r--drivers/acpi/acpica/dsutils.c4
-rw-r--r--drivers/acpi/acpica/dswload.c17
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c9
-rw-r--r--drivers/acpi/acpica/exdebug.c42
-rw-r--r--drivers/acpi/acpica/exdump.c9
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c52
-rw-r--r--drivers/acpi/acpica/exmisc.c36
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c8
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exregion.c9
-rw-r--r--drivers/acpi/acpica/exstorob.c18
-rw-r--r--drivers/acpi/acpica/exutils.c32
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c125
-rw-r--r--drivers/acpi/acpica/nsaccess.c16
-rw-r--r--drivers/acpi/acpica/nsconvert.c10
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nseval.c7
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsparse.c7
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c37
-rw-r--r--drivers/acpi/acpica/nsutils.c3
-rw-r--r--drivers/acpi/acpica/nsxfeval.c5
-rw-r--r--drivers/acpi/acpica/nsxfname.c29
-rw-r--r--drivers/acpi/acpica/psutils.c7
-rw-r--r--drivers/acpi/acpica/rscreate.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c8
-rw-r--r--drivers/acpi/acpica/rsutils.c13
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbdata.c8
-rw-r--r--drivers/acpi/acpica/tbfadt.c27
-rw-r--r--drivers/acpi/acpica/tbfind.c21
-rw-r--r--drivers/acpi/acpica/tbinstal.c7
-rw-r--r--drivers/acpi/acpica/tbprint.c10
-rw-r--r--drivers/acpi/acpica/tbutils.c37
-rw-r--r--drivers/acpi/acpica/tbxface.c17
-rw-r--r--drivers/acpi/acpica/tbxfload.c17
-rw-r--r--drivers/acpi/acpica/utalloc.c6
-rw-r--r--drivers/acpi/acpica/utbuffer.c4
-rw-r--r--drivers/acpi/acpica/utcache.c6
-rw-r--r--drivers/acpi/acpica/utcopy.c42
-rw-r--r--drivers/acpi/acpica/utdebug.c4
-rw-r--r--drivers/acpi/acpica/utglobal.c13
-rw-r--r--drivers/acpi/acpica/utids.c100
-rw-r--r--drivers/acpi/acpica/utmisc.c9
-rw-r--r--drivers/acpi/acpica/utosi.c9
-rw-r--r--drivers/acpi/acpica/utpredef.c4
-rw-r--r--drivers/acpi/acpica/utprint.c6
-rw-r--r--drivers/acpi/acpica/utstring.c33
-rw-r--r--drivers/acpi/acpica/uttrack.c8
-rw-r--r--drivers/acpi/acpica/utxface.c8
-rw-r--r--drivers/acpi/acpica/utxfinit.c10
-rw-r--r--drivers/acpi/blacklist.c26
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/nfit.c134
-rw-r--r--drivers/acpi/nfit.h20
-rw-r--r--drivers/acpi/osl.c30
-rw-r--r--drivers/acpi/resource.c162
-rw-r--r--drivers/acpi/scan.c32
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci_platform.c9
-rw-r--r--drivers/base/firmware_class.c16
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/wakeirq.c12
-rw-r--r--drivers/base/power/wakeup.c31
-rw-r--r--drivers/block/drbd/drbd_debugfs.c10
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nvme-core.c9
-rw-r--r--drivers/block/rbd.c111
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/clk/at91/clk-h32mx.c4
-rw-r--r--drivers/clk/at91/clk-main.c4
-rw-r--r--drivers/clk/at91/clk-master.c8
-rw-r--r--drivers/clk/at91/clk-pll.c8
-rw-r--r--drivers/clk/at91/clk-system.c8
-rw-r--r--drivers/clk/at91/clk-utmi.c8
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c6
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c13
-rw-r--r--drivers/clk/clk-stm32f4.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c26
-rw-r--r--drivers/clk/qcom/clk-rcg2.c9
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c12
-rw-r--r--drivers/clk/st/clkgen-mux.c10
-rw-r--r--drivers/clk/st/clkgen-pll.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c6
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c7
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c43
-rw-r--r--drivers/crypto/nx/nx-sha512.c44
-rw-r--r--drivers/crypto/nx/nx.c3
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/edac/octeon_edac-l2c.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c2
-rw-r--r--drivers/edac/octeon_edac-pc.c2
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c370
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c44
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c2
-rw-r--r--drivers/hwmon/mcp3021.c14
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/w83627ehf.c26
-rw-r--r--drivers/hwmon/w83792d.c27
-rw-r--r--drivers/hwspinlock/Kconfig24
-rw-r--r--drivers/hwspinlock/Makefile2
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c79
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c18
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c181
-rw-r--r--drivers/hwspinlock/sirf_hwspinlock.c136
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c15
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/i2c-core.c16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c183
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h23
-rw-r--r--drivers/input/input.c34
-rw-r--r--drivers/input/joystick/xpad.c78
-rw-r--r--drivers/input/keyboard/imx_keypad.c4
-rw-r--r--drivers/input/misc/axp20x-pek.c8
-rw-r--r--drivers/input/mouse/elan_i2c_core.c12
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c9
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c69
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c1149
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/irqchip/irqchip.h19
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/memory/omap-gpmc.c8
-rw-r--r--drivers/misc/cxl/api.c12
-rw-r--r--drivers/misc/cxl/context.c14
-rw-r--r--drivers/misc/cxl/main.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/vphb.c3
-rw-r--r--drivers/misc/mei/bus.c16
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/nfc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c49
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/bonding/bond_main.c51
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/can/rcar_can.c16
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c88
-rw-r--r--drivers/net/ethernet/sfc/ef10.c172
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c59
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/macvtap.c66
-rw-r--r--drivers/net/ntb_netdev.c58
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/tun.c67
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c63
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/ntb/Kconfig39
-rw-r--r--drivers/ntb/Makefile5
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/intel/Kconfig7
-rw-r--r--drivers/ntb/hw/intel/Makefile1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c2274
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h342
-rw-r--r--drivers/ntb/ntb.c251
-rw-r--r--drivers/ntb/ntb_hw.c1895
-rw-r--r--drivers/ntb/ntb_hw.h256
-rw-r--r--drivers/ntb/ntb_regs.h177
-rw-r--r--drivers/ntb/ntb_transport.c1029
-rw-r--r--drivers/ntb/test/Kconfig19
-rw-r--r--drivers/ntb/test/Makefile2
-rw-r--r--drivers/ntb/test/ntb_pingpong.c250
-rw-r--r--drivers/ntb/test/ntb_tool.c556
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/dell-laptop.c183
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c767
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/remoteproc/Kconfig13
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c3
-rw-r--r--drivers/remoteproc/remoteproc_core.c115
-rw-r--r--drivers/remoteproc/remoteproc_internal.h2
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c4
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c257
-rw-r--r--drivers/s390/kvm/virtio_ccw.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c227
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h6
-rw-r--r--drivers/soc/qcom/spm.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c38
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c137
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c53
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c58
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c275
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h11
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c53
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c182
-rw-r--r--drivers/target/loopback/tcm_loop.h9
-rw-r--r--drivers/target/sbp/sbp_target.c275
-rw-r--r--drivers/target/sbp/sbp_target.h11
-rw-r--r--drivers/target/target_core_alua.c463
-rw-r--r--drivers/target/target_core_alua.h14
-rw-r--r--drivers/target/target_core_configfs.c758
-rw-r--r--drivers/target/target_core_device.c1289
-rw-r--r--drivers/target/target_core_fabric_configfs.c230
-rw-r--r--drivers/target/target_core_fabric_lib.c283
-rw-r--r--drivers/target/target_core_file.c235
-rw-r--r--drivers/target/target_core_file.h6
-rw-r--r--drivers/target/target_core_hba.c97
-rw-r--r--drivers/target/target_core_iblock.c96
-rw-r--r--drivers/target/target_core_internal.h103
-rw-r--r--drivers/target/target_core_pr.c379
-rw-r--r--drivers/target/target_core_pr.h6
-rw-r--r--drivers/target/target_core_pscsi.c55
-rw-r--r--drivers/target/target_core_rd.c90
-rw-r--r--drivers/target/target_core_sbc.c285
-rw-r--r--drivers/target/target_core_spc.c90
-rw-r--r--drivers/target/target_core_stat.c608
-rw-r--r--drivers/target/target_core_tmr.c24
-rw-r--r--drivers/target/target_core_tpg.c574
-rw-r--r--drivers/target/target_core_transport.c249
-rw-r--r--drivers/target/target_core_ua.c81
-rw-r--r--drivers/target/target_core_ua.h6
-rw-r--r--drivers/target/target_core_user.c266
-rw-r--r--drivers/target/target_core_xcopy.c25
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h3
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c11
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c112
-rw-r--r--drivers/target/tcm_fc/tfc_io.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c58
-rw-r--r--drivers/tty/serial/omap-serial.c35
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/storage_common.c2
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c192
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.h11
-rw-r--r--drivers/vhost/Kconfig15
-rw-r--r--drivers/vhost/scsi.c219
-rw-r--r--drivers/vhost/vhost.c85
-rw-r--r--drivers/vhost/vhost.h25
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c9
-rw-r--r--drivers/video/fbdev/stifb.c40
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_pci_common.h2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c13
-rw-r--r--drivers/virtio/virtio_pci_modern.c24
-rw-r--r--drivers/xen/xen-scsiback.c191
354 files changed, 14118 insertions, 10649 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f15db002be8e..114cf48085ab 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -80,6 +80,26 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
+config ACPI_REV_OVERRIDE_POSSIBLE
+ bool "Allow supported ACPI revision to be overriden"
+ depends on X86
+ default y
+ help
+ The platform firmware on some systems expects Linux to return "5" as
+ the supported ACPI revision which makes it expose system configuration
+ information in a special way.
+
+ For example, based on what ACPI exports as the supported revision,
+ Dell XPS 13 (2015) configures its audio device to either work in HDA
+ mode or in I2S mode, where the former is supposed to be used on Linux
+ until the latter is fully supported (in the kernel as well as in user
+ space).
+
+ This option enables a DMI-based quirk for the above Dell machine (so
+ that HDA audio is exposed by the platform firmware to the kernel) and
+ makes it possible to force the kernel to return "5" as the supported
+ ACPI revision via the "acpi_rev_override" command line switch.
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 569ee090343f..46b58abb08c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
pdata->mmio_size = resource_size(rentry->res);
pdata->mmio_base = ioremap(rentry->res->start,
pdata->mmio_size);
- if (!pdata->mmio_base)
- goto err_out;
break;
}
acpi_dev_free_resource_list(&resource_list);
+ if (!pdata->mmio_base) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 853aa2dbdb61..a8d8092ee391 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -59,5 +59,8 @@
#include "acglobal.h" /* All global variables */
#include "achware.h" /* Hardware defines and interfaces */
#include "acutils.h" /* Utility interfaces */
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+#include "acclib.h" /* C library interfaces */
+#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
#endif /* __ACCOMMON_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index a0c478784314..53f96a370762 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -61,6 +61,8 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
#if (!ACPI_REDUCED_HARDWARE)
ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 1886bde54b5d..7ac98000b46b 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
void acpi_ex_integer_to_string(char *dest, u64 value);
+void acpi_ex_pci_cls_to_string(char *dest, u8 class_code[3]);
+
u8 acpi_is_valid_space_id(u8 space_id);
/*
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ffdb956391f6..bc600969c6a1 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,6 +213,7 @@ struct acpi_table_list {
#define ACPI_TABLE_INDEX_DSDT (0)
#define ACPI_TABLE_INDEX_FACS (1)
+#define ACPI_TABLE_INDEX_X_FACS (2)
struct acpi_find_context {
char *search_for;
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 952fbe0b7231..0dd088290d80 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -66,6 +66,7 @@
#define ACPI_NS_PREFIX_IS_SCOPE 0x10
#define ACPI_NS_EXTERNAL 0x20
#define ACPI_NS_TEMPORARY 0x40
+#define ACPI_NS_OVERRIDE_IF_FOUND 0x80
/* Flags for acpi_ns_walk_namespace */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 3e9720e1f34f..c81d98d09cac 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -335,6 +335,7 @@ struct acpi_object_reference {
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node; /* ref_of or Namepath */
union acpi_operand_object **where; /* Target of Index */
+ u8 *index_pointer; /* Used for Buffers and Strings */
u32 value; /* Used for Local/Arg/Index/ddb_handle */
};
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 87c7860b3394..44997ca02ae2 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -82,6 +82,7 @@ struct acpi_walk_state {
u8 return_used;
u8 scope_depth;
u8 pass_number; /* Parse pass during table load */
+ u8 namespace_override; /* Override existing objects */
u8 result_size; /* Total elements for the result stack */
u8 result_count; /* Current number of occupied elements of result stack */
u32 aml_offset;
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index d49f5c7a20d9..6de0d3573037 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -205,66 +205,6 @@ acpi_status acpi_ut_hardware_initialize(void);
void acpi_ut_subsystem_shutdown(void);
-/*
- * utclib - Local implementations of C library functions
- */
-#ifndef ACPI_USE_SYSTEM_CLIBRARY
-
-acpi_size acpi_ut_strlen(const char *string);
-
-char *acpi_ut_strchr(const char *string, int ch);
-
-char *acpi_ut_strcpy(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncpy(char *dst_string,
- const char *src_string, acpi_size count);
-
-int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count);
-
-int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count);
-
-int acpi_ut_strcmp(const char *string1, const char *string2);
-
-char *acpi_ut_strcat(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncat(char *dst_string,
- const char *src_string, acpi_size count);
-
-u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base);
-
-char *acpi_ut_strstr(char *string1, char *string2);
-
-void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count);
-
-void *acpi_ut_memset(void *dest, u8 value, acpi_size count);
-
-int acpi_ut_to_upper(int c);
-
-int acpi_ut_to_lower(int c);
-
-extern const u8 _acpi_ctype[];
-
-#define _ACPI_XA 0x00 /* extra alphabetic - not supported */
-#define _ACPI_XS 0x40 /* extra space */
-#define _ACPI_BB 0x00 /* BEL, BS, etc. - not supported */
-#define _ACPI_CN 0x20 /* CR, FF, HT, NL, VT */
-#define _ACPI_DI 0x04 /* '0'-'9' */
-#define _ACPI_LO 0x02 /* 'a'-'z' */
-#define _ACPI_PU 0x10 /* punctuation */
-#define _ACPI_SP 0x08 /* space, tab, CR, LF, VT, FF */
-#define _ACPI_UP 0x01 /* 'A'-'Z' */
-#define _ACPI_XD 0x80 /* '0'-'9', 'A'-'F', 'a'-'f' */
-
-#define ACPI_IS_DIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
-#define ACPI_IS_SPACE(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
-#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
-#define ACPI_IS_UPPER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
-#define ACPI_IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_XS | _ACPI_PU))
-#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
-
-#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
-
#define ACPI_IS_ASCII(c) ((c) < 0x80)
/*
@@ -430,6 +370,10 @@ acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id_list ** return_cid_list);
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id);
+
/*
* utlock - reader/writer locks
*/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 43b40de90484..20de148594fd 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -502,7 +502,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
}
}
- ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+ memset(&info, 0, sizeof(struct acpi_create_field_info));
/* Second arg is the field flags */
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index bbe74bcebbae..95779e8ec3bb 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -207,7 +207,7 @@ acpi_ds_initialize_objects(u32 table_index,
/* Set all init info to zero */
- ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+ memset(&info, 0, sizeof(struct acpi_init_walk_info));
info.owner_id = owner_id;
info.table_index = table_index;
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8a7b07b6adc8..2beb7fd674ae 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -339,8 +339,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/* Initialize buffer from the byte_list (if present) */
if (byte_list) {
- ACPI_MEMCPY(obj_desc->buffer.pointer,
- byte_list->named.data, byte_list_length);
+ memcpy(obj_desc->buffer.pointer, byte_list->named.data,
+ byte_list_length);
}
}
@@ -750,8 +750,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_STRING:
obj_desc->string.pointer = op->common.value.string;
- obj_desc->string.length =
- (u32) ACPI_STRLEN(op->common.value.string);
+ obj_desc->string.length = (u32)strlen(op->common.value.string);
/*
* The string is contained in the ACPI table, don't ever try
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index deeddd6d2f05..ebc577baeaf9 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -572,8 +572,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
obj_desc =
acpi_ut_create_string_object((acpi_size) name_length);
- ACPI_STRNCPY(obj_desc->string.pointer,
- name_string, name_length);
+ strncpy(obj_desc->string.pointer,
+ name_string, name_length);
status = AE_OK;
} else {
/*
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 843942fb4be5..845ff44919c3 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -315,10 +315,19 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
flags = ACPI_NS_NO_UPSEARCH;
if ((walk_state->opcode != AML_SCOPE_OP) &&
(!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
- flags |= ACPI_NS_ERROR_IF_FOUND;
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "[%s] Cannot already exist\n",
- acpi_ut_get_type_name(object_type)));
+ if (walk_state->namespace_override) {
+ flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[%s] Override allowed\n",
+ acpi_ut_get_type_name
+ (object_type)));
+ } else {
+ flags |= ACPI_NS_ERROR_IF_FOUND;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "[%s] Cannot already exist\n",
+ acpi_ut_get_type_name
+ (object_type)));
+ }
} else {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"[%s] Both Find or Create allowed\n",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 8840296d5b20..ea4c0d3fca2d 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -377,7 +377,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
/* 4) The last two characters of the name are the hex GPE Number */
- gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ gpe_number = strtoul(&name[2], NULL, 16);
if (gpe_number == ACPI_UINT32_MAX) {
/* Conversion failed; invalid method, just ignore it */
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 6e0df2b9d5a4..24a4c5c2b124 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -470,7 +470,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(table, table_header, length);
+ memcpy(table, table_header, length);
break;
default:
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 89a976b4ccf2..075d654c837f 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -227,9 +227,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the integer to the buffer, LSB first */
new_buf = return_desc->buffer.pointer;
- ACPI_MEMCPY(new_buf,
- &obj_desc->integer.value,
- acpi_gbl_integer_byte_width);
+ memcpy(new_buf,
+ &obj_desc->integer.value, acpi_gbl_integer_byte_width);
break;
case ACPI_TYPE_STRING:
@@ -252,8 +251,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index e67d0aca3fe6..815442bbd051 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -76,6 +76,8 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
{
u32 i;
u32 timer;
+ union acpi_operand_object *object_desc;
+ u32 value;
ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
@@ -254,8 +256,44 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
object)->object,
level + 4, 0);
} else {
- acpi_ex_do_debug_object(source_desc->reference.
- object, level + 4, 0);
+ object_desc = source_desc->reference.object;
+ value = source_desc->reference.value;
+
+ switch (object_desc->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("Buffer[%u] = 0x%2.2X\n",
+ value,
+ *source_desc->reference.
+ index_pointer);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf
+ ("String[%u] = \"%c\" (0x%2.2X)\n",
+ value,
+ *source_desc->reference.
+ index_pointer,
+ *source_desc->reference.
+ index_pointer);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("Package[%u] = ", value);
+ acpi_ex_do_debug_object(*source_desc->
+ reference.where,
+ level + 4, 0);
+ break;
+
+ default:
+
+ acpi_os_printf
+ ("Unknown Reference object type %X\n",
+ object_desc->common.type);
+ break;
+ }
}
}
break;
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 1da52bef632e..401e7edcd419 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -224,7 +224,7 @@ static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
+static struct acpi_exdump_info acpi_ex_dump_reference[9] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
@@ -232,6 +232,8 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.index_pointer),
+ "Index Pointer"},
{ACPI_EXD_REFERENCE, 0, NULL}
};
@@ -1005,14 +1007,13 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
} else if (obj_desc->reference.object) {
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
ACPI_DESC_TYPE_OPERAND) {
- acpi_os_printf(" Target: %p",
+ acpi_os_printf("%22s %p", "Target :",
obj_desc->reference.object);
if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
acpi_os_printf(" Table Index: %X\n",
obj_desc->reference.value);
} else {
- acpi_os_printf(" Target: %p [%s]\n",
- obj_desc->reference.object,
+ acpi_os_printf(" [%s]\n",
acpi_ut_get_type_name(((union
acpi_operand_object
*)
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index c161dd974f74..61fd9c7b88bc 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -428,7 +428,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
buffer = buffer_desc->buffer.pointer;
- ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length);
+ memcpy(buffer, source_desc->buffer.pointer, length);
/* Lock entire transaction if requested */
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 725a3746a2df..70b7bbbb860b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -416,22 +416,22 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
* Copy the data from the source buffer.
* Length is the field width in bytes.
*/
- ACPI_MEMCPY(value,
- (obj_desc->buffer_field.buffer_obj)->buffer.
- pointer +
- obj_desc->buffer_field.base_byte_offset +
- field_datum_byte_offset,
- obj_desc->common_field.access_byte_width);
+ memcpy(value,
+ (obj_desc->buffer_field.buffer_obj)->buffer.
+ pointer +
+ obj_desc->buffer_field.base_byte_offset +
+ field_datum_byte_offset,
+ obj_desc->common_field.access_byte_width);
} else {
/*
* Copy the data to the target buffer.
* Length is the field width in bytes.
*/
- ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer.
- pointer +
- obj_desc->buffer_field.base_byte_offset +
- field_datum_byte_offset, value,
- obj_desc->common_field.access_byte_width);
+ memcpy((obj_desc->buffer_field.buffer_obj)->buffer.
+ pointer +
+ obj_desc->buffer_field.base_byte_offset +
+ field_datum_byte_offset, value,
+ obj_desc->common_field.access_byte_width);
}
status = AE_OK;
@@ -703,7 +703,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
- ACPI_MEMSET(buffer, 0, buffer_length);
+ memset(buffer, 0, buffer_length);
access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
/* Handle the simple case here */
@@ -720,7 +720,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
status =
acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
ACPI_READ);
- ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+ memcpy(buffer, &raw_datum, buffer_length);
}
return_ACPI_STATUS(status);
@@ -793,9 +793,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Write merged datum to target buffer */
- ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
- ACPI_MIN(obj_desc->common_field.access_byte_width,
- buffer_length - buffer_offset));
+ memcpy(((char *)buffer) + buffer_offset, &merged_datum,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
buffer_offset += obj_desc->common_field.access_byte_width;
merged_datum =
@@ -811,9 +811,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Write the last datum to the buffer */
- ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
- ACPI_MIN(obj_desc->common_field.access_byte_width,
- buffer_length - buffer_offset));
+ memcpy(((char *)buffer) + buffer_offset, &merged_datum,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
return_ACPI_STATUS(AE_OK);
}
@@ -878,7 +878,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
* at Byte zero. All unused (upper) bytes of the
* buffer will be 0.
*/
- ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
+ memcpy((char *)new_buffer, (char *)buffer, buffer_length);
buffer = new_buffer;
buffer_length = required_length;
}
@@ -918,9 +918,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Get initial Datum from the input buffer */
- ACPI_MEMCPY(&raw_datum, buffer,
- ACPI_MIN(obj_desc->common_field.access_byte_width,
- buffer_length - buffer_offset));
+ memcpy(&raw_datum, buffer,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
merged_datum =
raw_datum << obj_desc->common_field.start_field_bit_offset;
@@ -970,9 +970,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Get the next input datum from the buffer */
buffer_offset += obj_desc->common_field.access_byte_width;
- ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
- ACPI_MIN(obj_desc->common_field.access_byte_width,
- buffer_length - buffer_offset));
+ memcpy(&raw_datum, ((char *)buffer) + buffer_offset,
+ ACPI_MIN(obj_desc->common_field.access_byte_width,
+ buffer_length - buffer_offset));
merged_datum |=
raw_datum << obj_desc->common_field.start_field_bit_offset;
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index b56fc9d6f48e..d02afece0f10 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -209,8 +209,8 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
* end_tag descriptor is copied from Operand1.
*/
new_buf = return_desc->buffer.pointer;
- ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
- ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
+ memcpy(new_buf, operand0->buffer.pointer, length0);
+ memcpy(new_buf + length0, operand1->buffer.pointer, length1);
/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
@@ -318,14 +318,14 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Copy the first integer, LSB first */
- ACPI_MEMCPY(new_buf, &operand0->integer.value,
- acpi_gbl_integer_byte_width);
+ memcpy(new_buf, &operand0->integer.value,
+ acpi_gbl_integer_byte_width);
/* Copy the second integer (LSB first) after the first */
- ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
- &local_operand1->integer.value,
- acpi_gbl_integer_byte_width);
+ memcpy(new_buf + acpi_gbl_integer_byte_width,
+ &local_operand1->integer.value,
+ acpi_gbl_integer_byte_width);
break;
case ACPI_TYPE_STRING:
@@ -346,9 +346,9 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Concatenate the strings */
- ACPI_STRCPY(new_buf, operand0->string.pointer);
- ACPI_STRCPY(new_buf + operand0->string.length,
- local_operand1->string.pointer);
+ strcpy(new_buf, operand0->string.pointer);
+ strcpy(new_buf + operand0->string.length,
+ local_operand1->string.pointer);
break;
case ACPI_TYPE_BUFFER:
@@ -369,11 +369,11 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Concatenate the buffers */
- ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
- operand0->buffer.length);
- ACPI_MEMCPY(new_buf + operand0->buffer.length,
- local_operand1->buffer.pointer,
- local_operand1->buffer.length);
+ memcpy(new_buf, operand0->buffer.pointer,
+ operand0->buffer.length);
+ memcpy(new_buf + operand0->buffer.length,
+ local_operand1->buffer.pointer,
+ local_operand1->buffer.length);
break;
default:
@@ -660,9 +660,9 @@ acpi_ex_do_logical_op(u16 opcode,
/* Lexicographic compare: compare the data bytes */
- compare = ACPI_MEMCMP(operand0->buffer.pointer,
- local_operand1->buffer.pointer,
- (length0 > length1) ? length1 : length0);
+ compare = memcmp(operand0->buffer.pointer,
+ local_operand1->buffer.pointer,
+ (length0 > length1) ? length1 : length0);
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 453b00c30177..20e87813c7d7 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -192,7 +192,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
char_buf[4] = '\0';
if (name_string) {
- ACPI_STRCAT(name_string, char_buf);
+ strcat(name_string, char_buf);
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Appended to - %s\n", name_string));
} else {
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index fcc618aa2061..b8944ebb1081 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -337,8 +337,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
* Copy the raw buffer data with no transform.
* (NULL terminated already)
*/
- ACPI_MEMCPY(return_desc->string.pointer,
- operand[0]->buffer.pointer, length);
+ memcpy(return_desc->string.pointer,
+ operand[0]->buffer.pointer, length);
break;
case AML_CONCAT_RES_OP:
@@ -380,6 +380,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
+ return_desc->reference.index_pointer =
+ &(operand[0]->buffer.pointer[index]);
break;
case ACPI_TYPE_BUFFER:
@@ -391,6 +393,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
+ return_desc->reference.index_pointer =
+ &(operand[0]->buffer.pointer[index]);
break;
case ACPI_TYPE_PACKAGE:
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1c64a988cbee..fa100b3b92ee 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -237,8 +237,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* We have a buffer, copy the portion requested */
- ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
- length);
+ memcpy(buffer, operand[0]->string.pointer + index,
+ length);
}
/* Set the length of the new String/Buffer */
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f6c2f5499935..b4a5e44c00dd 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -517,15 +517,14 @@ acpi_ex_data_table_space_handler(u32 function,
switch (function) {
case ACPI_READ:
- ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
- ACPI_PHYSADDR_TO_PTR(address),
- ACPI_DIV_8(bit_width));
+ memcpy(ACPI_CAST_PTR(char, value),
+ ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
- ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
- ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ memcpy(ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
break;
default:
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 6fa3c8d8fc5f..e1d4f4d51b97 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -100,9 +100,9 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/* Clear existing buffer and copy in the new one */
- ACPI_MEMSET(target_desc->buffer.pointer, 0,
- target_desc->buffer.length);
- ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length);
+ memset(target_desc->buffer.pointer, 0,
+ target_desc->buffer.length);
+ memcpy(target_desc->buffer.pointer, buffer, length);
#ifdef ACPI_OBSOLETE_BEHAVIOR
/*
@@ -129,8 +129,8 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
} else {
/* Truncate the source, copy only what will fit */
- ACPI_MEMCPY(target_desc->buffer.pointer, buffer,
- target_desc->buffer.length);
+ memcpy(target_desc->buffer.pointer, buffer,
+ target_desc->buffer.length);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Truncating source buffer from %X to %X\n",
@@ -187,9 +187,9 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
* String will fit in existing non-static buffer.
* Clear old string and copy in the new one
*/
- ACPI_MEMSET(target_desc->string.pointer, 0,
- (acpi_size) target_desc->string.length + 1);
- ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+ memset(target_desc->string.pointer, 0,
+ (acpi_size) target_desc->string.length + 1);
+ memcpy(target_desc->string.pointer, buffer, length);
} else {
/*
* Free the current buffer, then allocate a new buffer
@@ -210,7 +210,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
}
target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
- ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+ memcpy(target_desc->string.pointer, buffer, length);
}
/* Set the new target length */
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 3f4225e95d93..30c3f464fda5 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -380,6 +380,38 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_pci_cls_to_string
+ *
+ * PARAMETERS: out_string - Where to put the converted string (7 bytes)
+ * PARAMETERS: class_code - PCI class code to be converted (3 bytes)
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert 3-bytes PCI class code to string representation.
+ * Return buffer must be large enough to hold the string. The
+ * string returned is always exactly of length
+ * ACPI_PCICLS_STRING_SIZE (includes null terminator).
+ *
+ ******************************************************************************/
+
+void acpi_ex_pci_cls_to_string(char *out_string, u8 class_code[3])
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* All 3 bytes are hexadecimal */
+
+ out_string[0] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 4);
+ out_string[1] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 0);
+ out_string[2] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 4);
+ out_string[3] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 0);
+ out_string[4] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 4);
+ out_string[5] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 0);
+ out_string[6] = 0;
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_is_valid_space_id
*
* PARAMETERS: space_id - ID to be validated
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 3b3767698827..52dfd0d050fa 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,6 +50,13 @@
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
+#if (!ACPI_REDUCED_HARDWARE)
+static acpi_status
+acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs,
+ acpi_physical_address physical_address,
+ acpi_physical_address physical_address64);
+#endif
+
static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/*
@@ -72,6 +79,7 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
/*
* These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ * acpi_set_firmware_waking_vectors
* acpi_set_firmware_waking_vector
* acpi_set_firmware_waking_vector64
* acpi_enter_sleep_state_s4bios
@@ -80,20 +88,26 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
- * FUNCTION: acpi_set_firmware_waking_vector
+ * FUNCTION: acpi_hw_set_firmware_waking_vectors
*
- * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
+ * PARAMETERS: facs - Pointer to FACS table
+ * physical_address - 32-bit physical address of ACPI real mode
* entry point.
+ * physical_address64 - 64-bit physical address of ACPI protected
+ * mode entry point.
*
* RETURN: Status
*
- * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
*
******************************************************************************/
-acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+static acpi_status
+acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs,
+ acpi_physical_address physical_address,
+ acpi_physical_address physical_address64)
{
- ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+ ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vectors);
/*
@@ -106,17 +120,92 @@ acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
/* Set the 32-bit vector */
- acpi_gbl_FACS->firmware_waking_vector = physical_address;
+ facs->firmware_waking_vector = (u32)physical_address;
- /* Clear the 64-bit vector if it exists */
+ if (facs->length > 32) {
+ if (facs->version >= 1) {
- if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
- acpi_gbl_FACS->xfirmware_waking_vector = 0;
+ /* Set the 64-bit vector */
+
+ facs->xfirmware_waking_vector = physical_address64;
+ } else {
+ /* Clear the 64-bit vector if it exists */
+
+ facs->xfirmware_waking_vector = 0;
+ }
}
return_ACPI_STATUS(AE_OK);
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_firmware_waking_vectors
+ *
+ * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
+ * entry point.
+ * physical_address64 - 64-bit physical address of ACPI protected
+ * mode entry point.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_set_firmware_waking_vectors(acpi_physical_address physical_address,
+ acpi_physical_address physical_address64)
+{
+
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
+
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS (AE_OK);
+ }
+
+ if (acpi_gbl_facs32) {
+ (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
+ physical_address,
+ physical_address64);
+ }
+ if (acpi_gbl_facs64) {
+ (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
+ physical_address,
+ physical_address64);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vectors)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
+ * entry point.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ *
+ ******************************************************************************/
+acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+ status = acpi_set_firmware_waking_vectors((acpi_physical_address)
+ physical_address, 0);
+
+ return_ACPI_STATUS(status);
+}
+
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
#if ACPI_MACHINE_WIDTH == 64
@@ -136,25 +225,19 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
******************************************************************************/
acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
{
- ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
-
-
- /* Determine if the 64-bit vector actually exists */
+ acpi_status status;
- if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
- /* Clear 32-bit vector, set the 64-bit X_ vector */
+ status = acpi_set_firmware_waking_vectors(0,
+ (acpi_physical_address)
+ physical_address);
- acpi_gbl_FACS->firmware_waking_vector = 0;
- acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_s4bios
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 24fa19a76d70..c687b9979fb2 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -102,7 +102,7 @@ acpi_status acpi_ns_root_initialize(void)
/* _OSI is optional for now, will be permanent later */
- if (!ACPI_STRCMP(init_val->name, "_OSI")
+ if (!strcmp(init_val->name, "_OSI")
&& !acpi_gbl_create_osi_method) {
continue;
}
@@ -180,7 +180,7 @@ acpi_status acpi_ns_root_initialize(void)
/* Build an object around the static string */
- obj_desc->string.length = (u32)ACPI_STRLEN(val);
+ obj_desc->string.length = (u32)strlen(val);
obj_desc->string.pointer = val;
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
@@ -203,7 +203,7 @@ acpi_status acpi_ns_root_initialize(void)
/* Special case for ACPI Global Lock */
- if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
+ if (strcmp(init_val->name, "_GL_") == 0) {
acpi_gbl_global_lock_mutex = obj_desc;
/* Create additional counting semaphore for global lock */
@@ -304,7 +304,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
+ local_flags = flags &
+ ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_OVERRIDE_IF_FOUND |
+ ACPI_NS_SEARCH_PARENT);
*return_node = ACPI_ENTRY_NOT_FOUND;
acpi_gbl_ns_lookup_count++;
@@ -547,6 +549,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
if (flags & ACPI_NS_ERROR_IF_FOUND) {
local_flags |= ACPI_NS_ERROR_IF_FOUND;
}
+
+ /* Set override flag according to caller */
+
+ if (flags & ACPI_NS_OVERRIDE_IF_FOUND) {
+ local_flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+ }
}
/* Extract one ACPI name from the front of the pathname */
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 1a8b39c8d969..da55a1c60da1 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -187,8 +187,8 @@ acpi_ns_convert_to_string(union acpi_operand_object *original_object,
* Copy the raw buffer data with no transform. String is already NULL
* terminated at Length+1.
*/
- ACPI_MEMCPY(new_object->string.pointer,
- original_object->buffer.pointer, length);
+ memcpy(new_object->string.pointer,
+ original_object->buffer.pointer, length);
break;
default:
@@ -251,9 +251,9 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
return (AE_NO_MEMORY);
}
- ACPI_MEMCPY(new_object->buffer.pointer,
- original_object->string.pointer,
- original_object->string.length);
+ memcpy(new_object->buffer.pointer,
+ original_object->string.pointer,
+ original_object->string.length);
break;
case ACPI_TYPE_PACKAGE:
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index d259393505fa..0f1daba640e7 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -101,7 +101,7 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
while (num_segments) {
for (i = 0; i < 4; i++) {
- ACPI_IS_PRINT(pathname[i]) ?
+ isprint((int)pathname[i]) ?
acpi_os_printf("%c", pathname[i]) :
acpi_os_printf("?");
}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 7bcc68f57afa..80670cb32b5a 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -59,15 +59,14 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
*
* FUNCTION: acpi_ns_evaluate
*
- * PARAMETERS: info - Evaluation info block, contains:
+ * PARAMETERS: info - Evaluation info block, contains these fields
+ * and more:
* prefix_node - Prefix or Method/Object Node to execute
* relative_path - Name of method to execute, If NULL, the
* Node is the object to execute
* parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
@@ -440,7 +439,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
/* Initialize the evaluation information block */
- ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
+ memset(info, 0, sizeof(struct acpi_evaluate_info));
info->prefix_node = parent_node;
/*
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 4a85c4517988..b744a53618eb 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -90,7 +90,7 @@ acpi_status acpi_ns_initialize_objects(void)
/* Set all init info to zero */
- ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+ memset(&info, 0, sizeof(struct acpi_init_walk_info));
/* Walk entire namespace from the supplied root */
@@ -566,7 +566,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
- ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
+ memset(info, 0, sizeof(struct acpi_evaluate_info));
info->prefix_node = device_node;
info->relative_pathname = METHOD_NAME__INI;
info->parameters = NULL;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index c95a119767b5..57a4cfe547e4 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -117,6 +117,13 @@ acpi_ns_one_complete_parse(u32 pass_number,
(u8) pass_number);
}
+ /* Found OSDT table, enable the namespace override feature */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) &&
+ pass_number == ACPI_IMODE_LOAD_PASS1) {
+ walk_state->namespace_override = TRUE;
+ }
+
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index c30672d23878..0515a70f42a4 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -580,7 +580,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
* # is a hex digit.
*/
for (dest = new_string->string.pointer; *source; dest++, source++) {
- *dest = (char)ACPI_TOUPPER(*source);
+ *dest = (char)toupper((int)*source);
}
acpi_ut_remove_reference(return_object);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 4a9d4a66016e..d73904013830 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -325,8 +325,41 @@ acpi_ns_search_and_enter(u32 target_name,
* If we found it AND the request specifies that a find is an error,
* return the error
*/
- if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
- status = AE_ALREADY_EXISTS;
+ if (status == AE_OK) {
+
+ /* The node was found in the namespace */
+
+ /*
+ * If the namespace override feature is enabled for this node,
+ * delete any existing attached sub-object and make the node
+ * look like a new node that is owned by the override table.
+ */
+ if (flags & ACPI_NS_OVERRIDE_IF_FOUND) {
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "Namespace override: %4.4s pass %u type %X Owner %X\n",
+ ACPI_CAST_PTR(char,
+ &target_name),
+ interpreter_mode,
+ (*return_node)->type,
+ walk_state->owner_id));
+
+ acpi_ns_delete_children(*return_node);
+ if (acpi_gbl_runtime_namespace_override) {
+ acpi_ut_remove_reference((*return_node)->object);
+ (*return_node)->object = NULL;
+ (*return_node)->owner_id =
+ walk_state->owner_id;
+ } else {
+ acpi_ns_remove_node(*return_node);
+ *return_node = ACPI_ENTRY_NOT_FOUND;
+ }
+ }
+
+ /* Return an error if we don't expect to find the object */
+
+ else if (flags & ACPI_NS_ERROR_IF_FOUND) {
+ status = AE_ALREADY_EXISTS;
+ }
}
#ifdef ACPI_ASL_COMPILER
if (*return_node && (*return_node)->type == ACPI_TYPE_ANY) {
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 6ad02008c0c2..8d8104b8bd28 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -292,8 +292,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
} else {
/* Convert the character to uppercase and save it */
- result[i] =
- (char)ACPI_TOUPPER((int)*external_name);
+ result[i] = (char)toupper((int)*external_name);
external_name++;
}
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index b6030a2deee1..6ee1e52b903d 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -696,7 +696,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
return (AE_CTRL_DEPTH);
}
- no_match = ACPI_STRCMP(hid->string, info->hid);
+ no_match = strcmp(hid->string, info->hid);
ACPI_FREE(hid);
if (no_match) {
@@ -715,8 +715,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
found = FALSE;
for (i = 0; i < cid->count; i++) {
- if (ACPI_STRCMP(cid->ids[i].string, info->hid)
- == 0) {
+ if (strcmp(cid->ids[i].string, info->hid) == 0) {
/* Found a matching CID */
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index d66c326485d8..9ff643b9553f 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -114,7 +114,7 @@ acpi_get_handle(acpi_handle parent,
/* Special case for root-only, since we can't search for it */
- if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
+ if (!strcmp(pathname, ACPI_NS_ROOT_PATH)) {
*ret_handle =
ACPI_CAST_PTR(acpi_handle, acpi_gbl_root_node);
return (AE_OK);
@@ -242,7 +242,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
/* Copy actual string and return a pointer to the next string area */
- ACPI_MEMCPY(string_area, source->string, source->length);
+ memcpy(string_area, source->string, source->length);
return (string_area + source->length);
}
@@ -260,7 +260,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
* control methods (Such as in the case of a device.)
*
* For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB,
- * _STA, _ADR, _sx_w, and _sx_d methods.
+ * _CLS, _STA, _ADR, _sx_w, and _sx_d methods.
*
* Note: Allocates the return buffer, must be freed by the caller.
*
@@ -276,11 +276,12 @@ acpi_get_object_info(acpi_handle handle,
struct acpi_pnp_device_id *hid = NULL;
struct acpi_pnp_device_id *uid = NULL;
struct acpi_pnp_device_id *sub = NULL;
+ struct acpi_pnp_device_id *cls = NULL;
char *next_id_string;
acpi_object_type type;
acpi_name name;
u8 param_count = 0;
- u8 valid = 0;
+ u16 valid = 0;
u32 info_size;
u32 i;
acpi_status status;
@@ -320,7 +321,7 @@ acpi_get_object_info(acpi_handle handle,
if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
/*
* Get extra info for ACPI Device/Processor objects only:
- * Run the Device _HID, _UID, _SUB, and _CID methods.
+ * Run the Device _HID, _UID, _SUB, _CID, and _CLS methods.
*
* Note: none of these methods are required, so they may or may
* not be present for this device. The Info->Valid bitfield is used
@@ -363,6 +364,14 @@ acpi_get_object_info(acpi_handle handle,
sizeof(struct acpi_pnp_device_id_list));
valid |= ACPI_VALID_CID;
}
+
+ /* Execute the Device._CLS method */
+
+ status = acpi_ut_execute_CLS(node, &cls);
+ if (ACPI_SUCCESS(status)) {
+ info_size += cls->length;
+ valid |= ACPI_VALID_CLS;
+ }
}
/*
@@ -486,6 +495,11 @@ acpi_get_object_info(acpi_handle handle,
}
}
+ if (cls) {
+ next_id_string = acpi_ns_copy_device_id(&info->class_code,
+ cls, next_id_string);
+ }
+
/* Copy the fixed-length data */
info->info_size = info_size;
@@ -510,6 +524,9 @@ cleanup:
if (cid_list) {
ACPI_FREE(cid_list);
}
+ if (cls) {
+ ACPI_FREE(cls);
+ }
return (status);
}
@@ -620,7 +637,7 @@ acpi_status acpi_install_method(u8 *buffer)
/* Copy the method AML to the local buffer */
- ACPI_MEMCPY(aml_buffer, aml_start, aml_length);
+ memcpy(aml_buffer, aml_start, aml_length);
/* Initialize the method object with the new method's information */
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 960505ab409a..32440912023a 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -93,10 +93,9 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
- (acpi_ps_get_opcode_info
- (opcode))->name,
- sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info(opcode))->
+ name, sizeof(op->common.aml_op_name)));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 15434e4c9b34..3fa829e96c2a 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -353,13 +353,13 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* +1 to include null terminator */
user_prt->length +=
- (u32) ACPI_STRLEN(user_prt->source) + 1;
+ (u32)strlen(user_prt->source) + 1;
break;
case ACPI_TYPE_STRING:
- ACPI_STRCPY(user_prt->source,
- obj_desc->string.pointer);
+ strcpy(user_prt->source,
+ obj_desc->string.pointer);
/*
* Add to the Length field the length of the string
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 1fe49d223663..ac37852e0821 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -119,7 +119,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/*
* Get the resource type and the initial (minimum) length
*/
- ACPI_MEMSET(resource, 0, INIT_RESOURCE_LENGTH(info));
+ memset(resource, 0, INIT_RESOURCE_LENGTH(info));
resource->type = INIT_RESOURCE_TYPE(info);
resource->length = INIT_RESOURCE_LENGTH(info);
break;
@@ -324,13 +324,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_SET8:
- ACPI_MEMSET(destination, info->aml_offset, info->value);
+ memset(destination, info->aml_offset, info->value);
break;
case ACPI_RSC_DATA8:
target = ACPI_ADD_PTR(char, resource, info->value);
- ACPI_MEMCPY(destination, source, ACPI_GET16(target));
+ memcpy(destination, source, ACPI_GET16(target));
break;
case ACPI_RSC_ADDRESS:
@@ -502,7 +502,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
switch (info->opcode) {
case ACPI_RSC_INITSET:
- ACPI_MEMSET(aml, 0, INIT_RESOURCE_LENGTH(info));
+ memset(aml, 0, INIT_RESOURCE_LENGTH(info));
aml_length = INIT_RESOURCE_LENGTH(info);
acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
aml_length, aml);
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index ece3cd60cc6a..52b024df0052 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -148,7 +148,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
case ACPI_RSC_MOVE_SERIAL_VEN:
case ACPI_RSC_MOVE_SERIAL_RES:
- ACPI_MEMCPY(destination, source, item_count);
+ memcpy(destination, source, item_count);
return;
/*
@@ -364,12 +364,11 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
* Zero the entire area of the buffer.
*/
total_length =
- (u32)
- ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+ (u32)strlen(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
1;
- total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+ total_length = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
- ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
+ memset(resource_source->string_ptr, 0, total_length);
/* Copy the resource_source string to the destination */
@@ -432,8 +431,8 @@ acpi_rs_set_resource_source(union aml_resource * aml,
/* Copy the resource_source string */
- ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
- resource_source->string_ptr);
+ strcpy(ACPI_CAST_PTR(char, &aml_resource_source[1]),
+ resource_source->string_ptr);
/*
* Add the length of the string (+ 1 for null terminator) to the
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 8e6276df0226..de51f836ef68 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -398,8 +398,8 @@ acpi_resource_to_address64(struct acpi_resource *resource,
/* Simple copy for 64 bit source */
- ACPI_MEMCPY(out, &resource->data,
- sizeof(struct acpi_resource_address64));
+ memcpy(out, &resource->data,
+ sizeof(struct acpi_resource_address64));
break;
default:
@@ -499,7 +499,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*/
if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
(vendor->uuid_subtype != info->uuid->subtype) ||
- (ACPI_MEMCMP(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
+ (memcmp(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
return (AE_OK);
}
@@ -513,7 +513,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
/* Found the correct resource, copy and return it */
- ACPI_MEMCPY(buffer->pointer, resource, resource->length);
+ memcpy(buffer->pointer, resource, resource->length);
buffer->length = resource->length;
/* Found the desired descriptor, terminate resource walk */
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index d7f8386455bd..5c9d5abf1588 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -73,7 +73,7 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
* Initialize the table descriptor. Set the pointer to NULL, since the
* table is not fully mapped at this time.
*/
- ACPI_MEMSET(table_desc, 0, sizeof(struct acpi_table_desc));
+ memset(table_desc, 0, sizeof(struct acpi_table_desc));
table_desc->address = address;
table_desc->length = table->length;
table_desc->flags = flags;
@@ -465,9 +465,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Copy and free the previous table array */
if (acpi_gbl_root_table_list.tables) {
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) table_count *
- sizeof(struct acpi_table_desc));
+ memcpy(tables, acpi_gbl_root_table_list.tables,
+ (acpi_size) table_count *
+ sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 7d2486005e3f..6253001b6375 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -350,9 +350,18 @@ void acpi_tb_parse_fadt(u32 table_index)
/* If Hardware Reduced flag is set, there is no FACS */
if (!acpi_gbl_reduced_hardware) {
- acpi_tb_install_fixed_table((acpi_physical_address)
- acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS,
- ACPI_TABLE_INDEX_FACS);
+ if (acpi_gbl_FADT.facs) {
+ acpi_tb_install_fixed_table((acpi_physical_address)
+ acpi_gbl_FADT.facs,
+ ACPI_SIG_FACS,
+ ACPI_TABLE_INDEX_FACS);
+ }
+ if (acpi_gbl_FADT.Xfacs) {
+ acpi_tb_install_fixed_table((acpi_physical_address)
+ acpi_gbl_FADT.Xfacs,
+ ACPI_SIG_FACS,
+ ACPI_TABLE_INDEX_X_FACS);
+ }
}
}
@@ -389,12 +398,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
/* Clear the entire local FADT */
- ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
+ memset(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
/* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
- ACPI_MEMCPY(&acpi_gbl_FADT, table,
- ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+ memcpy(&acpi_gbl_FADT, table,
+ ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
/* Take a copy of the Hardware Reduced flag */
@@ -491,13 +500,9 @@ static void acpi_tb_convert_fadt(void)
acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
/*
- * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+ * Expand the 32-bit DSDT addresses to 64-bit as necessary.
* Later ACPICA code will always use the X 64-bit field.
*/
- acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
- acpi_gbl_FADT.facs,
- acpi_gbl_FADT.Xfacs);
-
acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
acpi_gbl_FADT.dsdt,
acpi_gbl_FADT.Xdsdt);
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 0b879fcfef67..119c84ad9833 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -76,16 +76,16 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
- ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
+ memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_MOVE_NAME(header.signature, signature);
- ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
- header.signature, ACPI_NAME_SIZE)) {
+ if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature),
+ header.signature, ACPI_NAME_SIZE)) {
/* Not the requested table */
@@ -112,21 +112,20 @@ acpi_tb_find_table(char *signature,
/* Check for table match on all IDs */
- if (!ACPI_MEMCMP
+ if (!memcmp
(acpi_gbl_root_table_list.tables[i].pointer->signature,
header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
||
- !ACPI_MEMCMP
+ !memcmp
(acpi_gbl_root_table_list.
tables[i].pointer->
oem_id,
header.oem_id,
ACPI_OEM_ID_SIZE))
&& (!oem_table_id[0]
- || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
- pointer->oem_table_id,
- header.oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE))) {
+ || !memcmp(acpi_gbl_root_table_list.tables[i].pointer->
+ oem_table_id, header.oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE))) {
*table_index = i;
ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 008a251780f4..15ea98e0068d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -87,8 +87,8 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
* not just the header.
*/
is_identical = (u8)((table_desc->length != table_length ||
- ACPI_MEMCMP(table_desc->pointer, table,
- table_length)) ? FALSE : TRUE);
+ memcmp(table_desc->pointer, table, table_length)) ?
+ FALSE : TRUE);
/* Release the acquired table */
@@ -289,8 +289,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
if ((new_table_desc.signature.ascii[0] != 0x00) &&
(!ACPI_COMPARE_NAME
(&new_table_desc.signature, ACPI_SIG_SSDT))
- && (ACPI_STRNCMP(new_table_desc.signature.ascii, "OEM", 3)))
- {
+ && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
ACPI_BIOS_ERROR((AE_INFO,
"Table has invalid signature [%4.4s] (0x%8.8X), "
"must be SSDT or OEMx",
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 77ba5c71c6e7..709d5112fc16 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -73,7 +73,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length)
{
while (length && *string) {
- if (!ACPI_IS_PRINT(*string)) {
+ if (!isprint((int)*string)) {
*string = '?';
}
string++;
@@ -100,7 +100,7 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
struct acpi_table_header *header)
{
- ACPI_MEMCPY(out_header, header, sizeof(struct acpi_table_header));
+ memcpy(out_header, header, sizeof(struct acpi_table_header));
acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
@@ -138,9 +138,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* RSDP has no common fields */
- ACPI_MEMCPY(local_header.oem_id,
- ACPI_CAST_PTR(struct acpi_table_rsdp,
- header)->oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(local_header.oem_id,
+ ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id,
+ ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 6559a58439c5..568ac0e4a3c6 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -68,7 +68,6 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
acpi_status acpi_tb_initialize_facs(void)
{
- acpi_status status;
/* If Hardware Reduced flag is set, there is no FACS */
@@ -77,11 +76,25 @@ acpi_status acpi_tb_initialize_facs(void)
return (AE_OK);
}
- status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_table_header,
- &acpi_gbl_FACS));
- return (status);
+ (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &acpi_gbl_facs32));
+ (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &acpi_gbl_facs64));
+
+ if (acpi_gbl_facs64
+ && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) {
+ acpi_gbl_FACS = acpi_gbl_facs64;
+ } else if (acpi_gbl_facs32) {
+ acpi_gbl_FACS = acpi_gbl_facs32;
+ }
+
+ /* If there is no FACS, just continue. There was already an error msg */
+
+ return (AE_OK);
}
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -101,7 +114,7 @@ acpi_status acpi_tb_initialize_facs(void)
u8 acpi_tb_tables_loaded(void)
{
- if (acpi_gbl_root_table_list.current_table_count >= 3) {
+ if (acpi_gbl_root_table_list.current_table_count >= 4) {
return (TRUE);
}
@@ -175,7 +188,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
return (NULL);
}
- ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+ memcpy(new_table, table_desc->pointer, table_desc->length);
acpi_tb_uninstall_table(table_desc);
acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
@@ -357,11 +370,11 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
/*
- * First two entries in the table array are reserved for the DSDT
- * and FACS, which are not actually present in the RSDT/XSDT - they
- * come from the FADT
+ * First three entries in the table array are reserved for the DSDT
+ * and 32bit/64bit FACS, which are not actually present in the
+ * RSDT/XSDT - they come from the FADT
*/
- acpi_gbl_root_table_list.current_table_count = 2;
+ acpi_gbl_root_table_list.current_table_count = 3;
/* Initialize the root table array from the RSDT/XSDT */
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 60e94f87f27a..5559e2c70b15 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -119,9 +119,9 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
} else {
/* Root Table Array has been statically allocated by the host */
- ACPI_MEMSET(initial_table_array, 0,
- (acpi_size) initial_table_count *
- sizeof(struct acpi_table_desc));
+ memset(initial_table_array, 0,
+ (acpi_size) initial_table_count *
+ sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
acpi_gbl_root_table_list.max_table_count = initial_table_count;
@@ -242,8 +242,9 @@ acpi_get_table_header(char *signature,
if (!header) {
return (AE_NO_MEMORY);
}
- ACPI_MEMCPY(out_table_header, header,
- sizeof(struct acpi_table_header));
+
+ memcpy(out_table_header, header,
+ sizeof(struct acpi_table_header));
acpi_os_unmap_memory(header,
sizeof(struct
acpi_table_header));
@@ -251,9 +252,9 @@ acpi_get_table_header(char *signature,
return (AE_NOT_FOUND);
}
} else {
- ACPI_MEMCPY(out_table_header,
- acpi_gbl_root_table_list.tables[i].pointer,
- sizeof(struct acpi_table_header));
+ memcpy(out_table_header,
+ acpi_gbl_root_table_list.tables[i].pointer,
+ sizeof(struct acpi_table_header));
}
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index aadb3002a2dd..9682d40ca6ff 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -150,8 +150,8 @@ static acpi_status acpi_tb_load_namespace(void)
* Save the original DSDT header for detection of table corruption
* and/or replacement of the DSDT from outside the OS.
*/
- ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
- sizeof(struct acpi_table_header));
+ memcpy(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -166,13 +166,18 @@ static acpi_status acpi_tb_load_namespace(void)
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if ((!ACPI_COMPARE_NAME
+ if (!acpi_gbl_root_table_list.tables[i].address ||
+ (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
&&
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.tables[i].
- signature), ACPI_SIG_PSDT))
+ signature), ACPI_SIG_PSDT)
+ &&
+ !ACPI_COMPARE_NAME(&
+ (acpi_gbl_root_table_list.tables[i].
+ signature), ACPI_SIG_OSDT))
||
ACPI_FAILURE(acpi_tb_validate_table
(&acpi_gbl_root_table_list.tables[i]))) {
@@ -219,9 +224,9 @@ acpi_install_table(acpi_physical_address address, u8 physical)
ACPI_FUNCTION_TRACE(acpi_install_table);
if (physical) {
- flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
- } else {
flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL;
+ } else {
+ flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
}
status = acpi_tb_install_standard_table(address, flags,
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 61d8f6d186d1..7a4101f0685e 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -73,7 +73,7 @@ void *acpi_os_allocate_zeroed(acpi_size size)
/* Clear the memory block */
- ACPI_MEMSET(allocation, 0, size);
+ memset(allocation, 0, size);
}
return (allocation);
@@ -181,7 +181,7 @@ acpi_status acpi_ut_delete_caches(void)
char buffer[7];
if (acpi_gbl_display_final_mem_stats) {
- ACPI_STRCPY(buffer, "MEMORY");
+ strcpy(buffer, "MEMORY");
(void)acpi_db_display_statistics(buffer);
}
#endif
@@ -337,6 +337,6 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
/* Have a valid buffer, clear it */
- ACPI_MEMSET(buffer->pointer, 0, required_length);
+ memset(buffer->pointer, 0, required_length);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index a8c39643e618..01c8709ca586 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -159,7 +159,7 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
}
buf_char = buffer[(acpi_size) i + j];
- if (ACPI_IS_PRINT(buf_char)) {
+ if (isprint(buf_char)) {
acpi_os_printf("%c", buf_char);
} else {
acpi_os_printf(".");
@@ -319,7 +319,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
}
buf_char = buffer[(acpi_size) i + j];
- if (ACPI_IS_PRINT(buf_char)) {
+ if (isprint(buf_char)) {
acpi_ut_file_printf(file, "%c", buf_char);
} else {
acpi_ut_file_printf(file, ".");
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index eacc5eee362e..0d21fbd99363 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -84,7 +84,7 @@ acpi_os_create_cache(char *cache_name,
/* Populate the cache object and return it */
- ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+ memset(cache, 0, sizeof(struct acpi_memory_list));
cache->list_name = cache_name;
cache->object_size = object_size;
cache->max_depth = max_depth;
@@ -212,7 +212,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
/* Mark the object as cached */
- ACPI_MEMSET(object, 0xCA, cache->object_size);
+ memset(object, 0xCA, cache->object_size);
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
/* Put the object at the head of the cache list */
@@ -281,7 +281,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
/* Clear (zero) the previously used Object */
- ACPI_MEMSET(object, 0, cache->object_size);
+ memset(object, 0, cache->object_size);
} else {
/* The cache is empty, create a new object */
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index c37ec5035f4c..257221d452c8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -129,7 +129,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
/* Always clear the external object */
- ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
+ memset(external_object, 0, sizeof(union acpi_object));
/*
* In general, the external object will be the same type as
@@ -149,9 +149,9 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
string.
length + 1);
- ACPI_MEMCPY((void *)data_space,
- (void *)internal_object->string.pointer,
- (acpi_size) internal_object->string.length + 1);
+ memcpy((void *)data_space,
+ (void *)internal_object->string.pointer,
+ (acpi_size) internal_object->string.length + 1);
break;
case ACPI_TYPE_BUFFER:
@@ -162,9 +162,9 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
length);
- ACPI_MEMCPY((void *)data_space,
- (void *)internal_object->buffer.pointer,
- internal_object->buffer.length);
+ memcpy((void *)data_space,
+ (void *)internal_object->buffer.pointer,
+ internal_object->buffer.length);
break;
case ACPI_TYPE_INTEGER:
@@ -502,9 +502,9 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
goto error_exit;
}
- ACPI_MEMCPY(internal_object->string.pointer,
- external_object->string.pointer,
- external_object->string.length);
+ memcpy(internal_object->string.pointer,
+ external_object->string.pointer,
+ external_object->string.length);
internal_object->string.length = external_object->string.length;
break;
@@ -517,9 +517,9 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
goto error_exit;
}
- ACPI_MEMCPY(internal_object->buffer.pointer,
- external_object->buffer.pointer,
- external_object->buffer.length);
+ memcpy(internal_object->buffer.pointer,
+ external_object->buffer.pointer,
+ external_object->buffer.length);
internal_object->buffer.length = external_object->buffer.length;
@@ -694,8 +694,8 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
copy_size = sizeof(struct acpi_namespace_node);
}
- ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
- ACPI_CAST_PTR(char, source_desc), copy_size);
+ memcpy(ACPI_CAST_PTR(char, dest_desc),
+ ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
@@ -725,9 +725,9 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
/* Copy the actual buffer data */
- ACPI_MEMCPY(dest_desc->buffer.pointer,
- source_desc->buffer.pointer,
- source_desc->buffer.length);
+ memcpy(dest_desc->buffer.pointer,
+ source_desc->buffer.pointer,
+ source_desc->buffer.length);
}
break;
@@ -747,9 +747,9 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
/* Copy the actual string data */
- ACPI_MEMCPY(dest_desc->string.pointer,
- source_desc->string.pointer,
- (acpi_size) source_desc->string.length + 1);
+ memcpy(dest_desc->string.pointer,
+ source_desc->string.pointer,
+ (acpi_size) source_desc->string.length + 1);
}
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 4f3f888d33bb..cd02693841db 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -111,8 +111,8 @@ void acpi_ut_track_stack_ptr(void)
* RETURN: Updated pointer to the function name
*
* DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- * This allows compiler macros such as __func__ to be used with no
- * change to the debug output.
+ * This allows compiler macros such as __func__ to be used
+ * with no change to the debug output.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 5e8df9177da4..a72685c1e819 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -102,12 +102,19 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_TZ_", ACPI_TYPE_DEVICE, NULL},
- {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
+ /*
+ * March, 2015:
+ * The _REV object is in the process of being deprecated, because
+ * other ACPI implementations permanently return 2. Thus, it
+ * has little or no value. Return 2 for compatibility with
+ * other ACPI implementations.
+ */
+ {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
- {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
+ {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
- {"_OSI", ACPI_TYPE_METHOD, (char *)1},
+ {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
#endif
/* Table terminator */
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 27431cfc1c44..7956df1e263c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: utids - support for device Ids - HID, UID, CID
+ * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
*****************************************************************************/
@@ -111,7 +111,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
} else {
- ACPI_STRCPY(hid->string, obj_desc->string.pointer);
+ strcpy(hid->string, obj_desc->string.pointer);
}
hid->length = length;
@@ -180,7 +180,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
/* Simply copy existing string */
- ACPI_STRCPY(sub->string, obj_desc->string.pointer);
+ strcpy(sub->string, obj_desc->string.pointer);
sub->length = length;
*return_id = sub;
@@ -256,7 +256,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
} else {
- ACPI_STRCPY(uid->string, obj_desc->string.pointer);
+ strcpy(uid->string, obj_desc->string.pointer);
}
uid->length = length;
@@ -393,8 +393,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/* Copy the String CID from the returned object */
- ACPI_STRCPY(next_id_string,
- cid_objects[i]->string.pointer);
+ strcpy(next_id_string, cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
@@ -416,3 +415,92 @@ cleanup:
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_CLS
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the _CLS is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _CLS control method that returns PCI-defined
+ * class code of the device. The _CLS value is always a package
+ * containing PCI class information as a list of integers.
+ * The returned string has format "BBSSPP", where:
+ * BB = Base-class code
+ * SS = Sub-class code
+ * PP = Programming Interface code
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **cls_objects;
+ u32 count;
+ struct acpi_pnp_device_id *cls;
+ u32 length;
+ acpi_status status;
+ u8 class_code[3] = { 0, 0, 0 };
+
+ ACPI_FUNCTION_TRACE(ut_execute_CLS);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CLS,
+ ACPI_BTYPE_PACKAGE, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the size of the String to be returned, includes null terminator */
+
+ length = ACPI_PCICLS_STRING_SIZE;
+ cls_objects = obj_desc->package.elements;
+ count = obj_desc->package.count;
+
+ if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+ if (count > 0
+ && cls_objects[0]->common.type == ACPI_TYPE_INTEGER) {
+ class_code[0] = (u8)cls_objects[0]->integer.value;
+ }
+ if (count > 1
+ && cls_objects[1]->common.type == ACPI_TYPE_INTEGER) {
+ class_code[1] = (u8)cls_objects[1]->integer.value;
+ }
+ if (count > 2
+ && cls_objects[2]->common.type == ACPI_TYPE_INTEGER) {
+ class_code[2] = (u8)cls_objects[2]->integer.value;
+ }
+ }
+
+ /* Allocate a buffer for the CLS */
+
+ cls =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
+ (acpi_size) length);
+ if (!cls) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for the string starts after PNP_DEVICE_ID struct */
+
+ cls->string =
+ ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id));
+
+ /* Simply copy existing string */
+
+ acpi_ex_pci_cls_to_string(cls->string, class_code);
+ cls->length = length;
+ *return_id = cls;
+
+cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index cbb7034d28d8..71b66537f826 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -66,9 +66,9 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
* Check if this is a PCI root bridge.
* ACPI 3.0+: check for a PCI Express root also.
*/
- if (!(ACPI_STRCMP(id,
- PCI_ROOT_HID_STRING)) ||
- !(ACPI_STRCMP(id, PCI_EXPRESS_ROOT_HID_STRING))) {
+ if (!(strcmp(id,
+ PCI_ROOT_HID_STRING)) ||
+ !(strcmp(id, PCI_EXPRESS_ROOT_HID_STRING))) {
return (TRUE);
}
@@ -97,7 +97,8 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT)) {
return (TRUE);
}
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 44035abdbf29..8f3d203aed79 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -232,8 +232,7 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name)
return (AE_NO_MEMORY);
}
- interface_info->name =
- ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
+ interface_info->name = ACPI_ALLOCATE_ZEROED(strlen(interface_name) + 1);
if (!interface_info->name) {
ACPI_FREE(interface_info);
return (AE_NO_MEMORY);
@@ -241,7 +240,7 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name)
/* Initialize new info and insert at the head of the global list */
- ACPI_STRCPY(interface_info->name, interface_name);
+ strcpy(interface_info->name, interface_name);
interface_info->flags = ACPI_OSI_DYNAMIC;
interface_info->next = acpi_gbl_supported_interfaces;
@@ -269,7 +268,7 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name)
previous_interface = next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
- if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+ if (!strcmp(interface_name, next_interface->name)) {
/* Found: name is in either the static list or was added at runtime */
@@ -373,7 +372,7 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
- if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+ if (!strcmp(interface_name, next_interface->name)) {
return (next_interface);
}
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29e449935a82..97898ed71b4b 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -148,7 +148,7 @@ void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
u32 j;
if (!expected_btypes) {
- ACPI_STRCPY(buffer, "NONE");
+ strcpy(buffer, "NONE");
return;
}
@@ -161,7 +161,7 @@ void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
/* If one of the expected types, concatenate the name of this type */
if (expected_btypes & this_rtype) {
- ACPI_STRCAT(buffer, &ut_rtype_names[i][j]);
+ strcat(buffer, &ut_rtype_names[i][j]);
j = 0; /* Use name separator from now on */
}
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 2be6bd4bdc09..b26297c5de49 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -180,7 +180,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
{
u64 number = 0;
- while (ACPI_IS_DIGIT(*string)) {
+ while (isdigit((int)*string)) {
number *= 10;
number += *(string++) - '0';
}
@@ -405,7 +405,7 @@ acpi_ut_vsnprintf(char *string,
/* Process width */
width = -1;
- if (ACPI_IS_DIGIT(*format)) {
+ if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
width = (s32) number;
} else if (*format == '*') {
@@ -422,7 +422,7 @@ acpi_ut_vsnprintf(char *string,
precision = -1;
if (*format == '.') {
++format;
- if (ACPI_IS_DIGIT(*format)) {
+ if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
precision = (s32) number;
} else if (*format == '*') {
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 83b6c52490dc..8f3c883dfe0e 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -79,7 +79,7 @@ void acpi_ut_strlwr(char *src_string)
/* Walk entire string, lowercasing the letters */
for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOLOWER(*string);
+ *string = (char)tolower((int)*string);
}
return;
@@ -145,7 +145,7 @@ void acpi_ut_strupr(char *src_string)
/* Walk entire string, uppercasing the letters */
for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOUPPER(*string);
+ *string = (char)toupper((int)*string);
}
return;
@@ -202,7 +202,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* Skip over any white space in the buffer */
- while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+ while ((*string) && (isspace((int)*string) || *string == '\t')) {
string++;
}
@@ -211,7 +211,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
* Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
* We need to determine if it is decimal or hexadecimal.
*/
- if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
sign_of0x = 1;
base = 16;
@@ -224,7 +224,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* Any string left? Check that '0x' is not followed by white space. */
- if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (!(*string) || isspace((int)*string) || *string == '\t') {
if (to_integer_op) {
goto error_exit;
} else {
@@ -241,7 +241,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* Main loop: convert the string to a 32- or 64-bit integer */
while (*string) {
- if (ACPI_IS_DIGIT(*string)) {
+ if (isdigit((int)*string)) {
/* Convert ASCII 0-9 to Decimal value */
@@ -252,8 +252,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
term = 1;
} else {
- this_digit = (u8)ACPI_TOUPPER(*string);
- if (ACPI_IS_XDIGIT((char)this_digit)) {
+ this_digit = (u8)toupper((int)*string);
+ if (isxdigit((int)this_digit)) {
/* Convert ASCII Hex char to value */
@@ -404,7 +404,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
/* Check for printable character or hex escape */
- if (ACPI_IS_PRINT(string[i])) {
+ if (isprint((int)string[i])) {
/* This is a normal character */
acpi_os_printf("%c", (int)string[i]);
@@ -609,22 +609,22 @@ void ut_convert_backslashes(char *pathname)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
{
- if (ACPI_STRLEN(source) >= dest_size) {
+ if (strlen(source) >= dest_size) {
return (TRUE);
}
- ACPI_STRCPY(dest, source);
+ strcpy(dest, source);
return (FALSE);
}
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
{
- if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+ if ((strlen(dest) + strlen(source)) >= dest_size) {
return (TRUE);
}
- ACPI_STRCAT(dest, source);
+ strcat(dest, source);
return (FALSE);
}
@@ -635,14 +635,13 @@ acpi_ut_safe_strncat(char *dest,
{
acpi_size actual_transfer_length;
- actual_transfer_length =
- ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+ actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
- if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+ if ((strlen(dest) + actual_transfer_length) >= dest_size) {
return (TRUE);
}
- ACPI_STRNCAT(dest, source, max_transfer_length);
+ strncat(dest, source, max_transfer_length);
return (FALSE);
}
#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 130dd9f96f0f..9a7dc8196a5d 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -100,7 +100,7 @@ acpi_ut_create_list(char *list_name,
return (AE_NO_MEMORY);
}
- ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+ memset(cache, 0, sizeof(struct acpi_memory_list));
cache->list_name = list_name;
cache->object_size = object_size;
@@ -402,7 +402,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
+ strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
if (!element) {
@@ -497,7 +497,7 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
/* Mark the segment as deleted */
- ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
+ memset(&allocation->user_space, 0xEA, allocation->size);
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return (status);
@@ -595,7 +595,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
while (element) {
if ((element->component & component) &&
((module == NULL)
- || (0 == ACPI_STRCMP(module, element->module)))) {
+ || (0 == strcmp(module, element->module)))) {
descriptor =
ACPI_CAST_PTR(union acpi_descriptor,
&element->user_space);
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 0929187bdce0..51cf52d52243 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -234,8 +234,8 @@ acpi_status acpi_get_statistics(struct acpi_statistics *stats)
stats->sci_count = acpi_sci_count;
stats->gpe_count = acpi_gpe_count;
- ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
- sizeof(acpi_fixed_event_count));
+ memcpy(stats->fixed_event_count, acpi_fixed_event_count,
+ sizeof(acpi_fixed_event_count));
/* Other counters */
@@ -322,7 +322,7 @@ acpi_status acpi_install_interface(acpi_string interface_name)
/* Parameter validation */
- if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ if (!interface_name || (strlen(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
@@ -374,7 +374,7 @@ acpi_status acpi_remove_interface(acpi_string interface_name)
/* Parameter validation */
- if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ if (!interface_name || (strlen(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 083a76891889..42a32a66ef22 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -179,10 +179,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
* Obtain a permanent mapping for the FACS. This is required for the
* Global Lock and the Firmware Waking Vector
*/
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
+ if (!(flags & ACPI_NO_FACS_INIT)) {
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 1d1791935c31..278dc4be992a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -162,6 +162,15 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
acpi_osi_setup("!Windows 2012");
return 0;
}
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s (force ACPI _REV to 5)\n",
+ d->ident);
+ acpi_rev_override_setup(NULL);
+ return 0;
+}
+#endif
static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
{
@@ -325,6 +334,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
+
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+ /*
+ * DELL XPS 13 (2015) switches sound between HDA and I2S
+ * depending on the ACPI _REV callback. If userspace supports
+ * I2S sufficiently (or if you do not care about sound), you
+ * can safely disable this quirk.
+ */
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL XPS 13 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
+ },
+ },
+#endif
{}
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 787c629bc9b4..4683a96932b9 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -58,6 +58,7 @@ void acpi_cmos_rtc_init(void);
#else
static inline void acpi_cmos_rtc_init(void) {}
#endif
+int acpi_rev_override_setup(char *str);
extern bool acpi_force_hot_remove;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2161fa178c8d..628a42c41ab1 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/sort.h>
+#include <linux/pmem.h>
#include <linux/io.h>
#include "nfit.h"
@@ -305,6 +306,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
return true;
}
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+ struct acpi_nfit_flush_address *flush)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
+ GFP_KERNEL);
+
+ if (!nfit_flush)
+ return false;
+ INIT_LIST_HEAD(&nfit_flush->list);
+ nfit_flush->flush = flush;
+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+ flush->device_handle, flush->hint_count);
+ return true;
+}
+
static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
const void *end)
{
@@ -338,7 +356,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
return err;
break;
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
- dev_dbg(dev, "%s: flush\n", __func__);
+ if (!add_flush(acpi_desc, table))
+ return err;
break;
case ACPI_NFIT_TYPE_SMBIOS:
dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +408,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
+ struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
@@ -442,6 +462,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_bdw = nfit_idt->idt;
break;
}
+
+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+ if (nfit_flush->flush->device_handle !=
+ nfit_memdev->memdev->device_handle)
+ continue;
+ nfit_mem->nfit_flush = nfit_flush;
+ break;
+ }
break;
}
@@ -978,6 +1006,24 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}
+static void wmb_blk(struct nfit_blk *nfit_blk)
+{
+
+ if (nfit_blk->nvdimm_flush) {
+ /*
+ * The first wmb() is needed to 'sfence' all previous writes
+ * such that they are architecturally visible for the platform
+ * buffer flush. Note that we've already arranged for pmem
+ * writes to avoid the cache via arch_memcpy_to_pmem(). The
+ * final wmb() ensures ordering for the NVDIMM flush write.
+ */
+ wmb();
+ writeq(1, nfit_blk->nvdimm_flush);
+ wmb();
+ } else
+ wmb_pmem();
+}
+
static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1012,7 +1058,10 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->base + offset);
- /* FIXME: conditionally perform read-back if mandated by firmware */
+ wmb_blk(nfit_blk);
+
+ if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+ readq(mmio->base + offset);
}
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1075,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
+ lane * mmio->size;
- /* TODO: non-temporal access, flush hints, cache management etc... */
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
while (len) {
unsigned int c;
@@ -1045,13 +1093,19 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
}
if (rw)
- memcpy(mmio->aperture + offset, iobuf + copied, c);
+ memcpy_to_pmem(mmio->aperture + offset,
+ iobuf + copied, c);
else
- memcpy(iobuf + copied, mmio->aperture + offset, c);
+ memcpy_from_pmem(iobuf + copied,
+ mmio->aperture + offset, c);
copied += c;
len -= c;
}
+
+ if (rw)
+ wmb_blk(nfit_blk);
+
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc;
}
@@ -1124,7 +1178,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
}
static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
resource_size_t start = spa->address;
resource_size_t n = spa->length;
@@ -1152,8 +1206,15 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
if (!res)
goto err_mem;
- /* TODO: cacheability based on the spa type */
- spa_map->iomem = ioremap_nocache(start, n);
+ if (type == SPA_MAP_APERTURE) {
+ /*
+ * TODO: memremap_pmem() support, but that requires cache
+ * flushing when the aperture is moved.
+ */
+ spa_map->iomem = ioremap_wc(start, n);
+ } else
+ spa_map->iomem = ioremap_nocache(start, n);
+
if (!spa_map->iomem)
goto err_map;
@@ -1171,6 +1232,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
* nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
* @nvdimm_bus: NFIT-bus that provided the spa table entry
* @nfit_spa: spa table to map
+ * @type: aperture or control region
*
* In the case where block-data-window apertures and
* dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1242,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
* unbound.
*/
static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
void __iomem *iomem;
mutex_lock(&acpi_desc->spa_map_mutex);
- iomem = __nfit_spa_map(acpi_desc, spa);
+ iomem = __nfit_spa_map(acpi_desc, spa, type);
mutex_unlock(&acpi_desc->spa_map_mutex);
return iomem;
@@ -1206,12 +1268,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
return 0;
}
+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
+{
+ struct nd_cmd_dimm_flags flags;
+ int rc;
+
+ memset(&flags, 0, sizeof(flags));
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
+ sizeof(flags));
+
+ if (rc >= 0 && flags.status == 0)
+ nfit_blk->dimm_flags = flags.flags;
+ else if (rc == -ENOTTY) {
+ /* fall back to a conservative default */
+ nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
+ rc = 0;
+ } else
+ rc = -ENXIO;
+
+ return rc;
+}
+
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
struct device *dev)
{
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+ struct nfit_flush *nfit_flush;
struct nfit_blk_mmio *mmio;
struct nfit_blk *nfit_blk;
struct nfit_mem *nfit_mem;
@@ -1223,8 +1308,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
nfit_mem ? "" : " nfit_mem",
- nfit_mem->dcr ? "" : " dcr",
- nfit_mem->bdw ? "" : " bdw");
+ (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
+ (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
return -ENXIO;
}
@@ -1237,7 +1322,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
/* map block aperture memory */
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+ SPA_MAP_APERTURE);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm));
@@ -1259,7 +1345,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
mmio = &nfit_blk->mmio[DCR];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+ SPA_MAP_CONTROL);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
nvdimm_name(nvdimm));
@@ -1277,6 +1364,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
return rc;
}
+ rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
+ if (rc < 0) {
+ dev_dbg(dev, "%s: %s failed get DIMM flags\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ nfit_flush = nfit_mem->nfit_flush;
+ if (nfit_flush && nfit_flush->flush->hint_count != 0) {
+ nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
+ nfit_flush->flush->hint_address[0], 8);
+ if (!nfit_blk->nvdimm_flush)
+ return -ENOMEM;
+ }
+
+ if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+
if (mmio->line_size == 0)
return 0;
@@ -1459,6 +1564,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
INIT_LIST_HEAD(&acpi_desc->dcrs);
INIT_LIST_HEAD(&acpi_desc->bdws);
INIT_LIST_HEAD(&acpi_desc->idts);
+ INIT_LIST_HEAD(&acpi_desc->flushes);
INIT_LIST_HEAD(&acpi_desc->memdevs);
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 81f2e8c5a79c..79b6d83875c1 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -40,6 +40,10 @@ enum nfit_uuids {
NFIT_UUID_MAX,
};
+enum {
+ ND_BLK_DCR_LATCH = 2,
+};
+
struct nfit_spa {
struct acpi_nfit_system_address *spa;
struct list_head list;
@@ -60,6 +64,11 @@ struct nfit_idt {
struct list_head list;
};
+struct nfit_flush {
+ struct acpi_nfit_flush_address *flush;
+ struct list_head list;
+};
+
struct nfit_memdev {
struct acpi_nfit_memory_map *memdev;
struct list_head list;
@@ -77,6 +86,7 @@ struct nfit_mem {
struct acpi_nfit_system_address *spa_bdw;
struct acpi_nfit_interleave *idt_dcr;
struct acpi_nfit_interleave *idt_bdw;
+ struct nfit_flush *nfit_flush;
struct list_head list;
struct acpi_device *adev;
unsigned long dsm_mask;
@@ -88,6 +98,7 @@ struct acpi_nfit_desc {
struct mutex spa_map_mutex;
struct list_head spa_maps;
struct list_head memdevs;
+ struct list_head flushes;
struct list_head dimms;
struct list_head spas;
struct list_head dcrs;
@@ -109,7 +120,7 @@ struct nfit_blk {
struct nfit_blk_mmio {
union {
void __iomem *base;
- void *aperture;
+ void __pmem *aperture;
};
u64 size;
u64 base_offset;
@@ -123,6 +134,13 @@ struct nfit_blk {
u64 bdw_offset; /* post interleave offset */
u64 stat_offset;
u64 cmd_offset;
+ void __iomem *nvdimm_flush;
+ u32 dimm_flags;
+};
+
+enum spa_map_type {
+ SPA_MAP_CONTROL,
+ SPA_MAP_APERTURE,
};
struct nfit_spa_mapping {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a5dc9034efee..3b8963f21b36 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
if (!addr || !length)
return;
- acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+ /* Resources are never freed */
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
-static void __init acpi_reserve_resources(void)
+static int __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+ return 0;
}
+fs_initcall_sync(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
@@ -530,6 +537,19 @@ acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
}
#endif
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+static bool acpi_rev_override;
+
+int __init acpi_rev_override_setup(char *str)
+{
+ acpi_rev_override = true;
+ return 1;
+}
+__setup("acpi_rev_override", acpi_rev_override_setup);
+#else
+#define acpi_rev_override false
+#endif
+
#define ACPI_MAX_OVERRIDE_LEN 100
static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
@@ -548,6 +568,11 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
*new_val = acpi_os_name;
}
+ if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
+ printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
+ *new_val = (char *)5;
+ }
+
return AE_OK;
}
@@ -1844,7 +1869,6 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- acpi_reserve_resources();
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 10561ce16ed1..8244f013f210 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
-#include <linux/list.h>
#include <linux/slab.h>
#ifdef CONFIG_X86
@@ -622,164 +621,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
-
-struct reserved_region {
- struct list_head node;
- u64 start;
- u64 end;
-};
-
-static LIST_HEAD(reserved_io_regions);
-static LIST_HEAD(reserved_mem_regions);
-
-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
- char *desc)
-{
- unsigned int length = end - start + 1;
- struct resource *res;
-
- res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
- request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (!res)
- return -EIO;
-
- res->flags &= ~flags;
- return 0;
-}
-
-static int add_region_before(u64 start, u64 end, u8 space_id,
- unsigned long flags, char *desc,
- struct list_head *head)
-{
- struct reserved_region *reg;
- int error;
-
- reg = kmalloc(sizeof(*reg), GFP_KERNEL);
- if (!reg)
- return -ENOMEM;
-
- error = request_range(start, end, space_id, flags, desc);
- if (error) {
- kfree(reg);
- return error;
- }
-
- reg->start = start;
- reg->end = end;
- list_add_tail(&reg->node, head);
- return 0;
-}
-
-/**
- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
- * @start: Starting address of the region.
- * @length: Length of the region.
- * @space_id: Identifier of address space to reserve the region from.
- * @flags: Resource flags to clear for the region after requesting it.
- * @desc: Region description (for messages).
- *
- * Reserve an I/O or memory region as a system resource to prevent others from
- * using it. If the new region overlaps with one of the regions (in the given
- * address space) already reserved by this routine, only the non-overlapping
- * parts of it will be reserved.
- *
- * Returned is either 0 (success) or a negative error code indicating a resource
- * reservation problem. It is the code of the first encountered error, but the
- * routine doesn't abort until it has attempted to request all of the parts of
- * the new region that don't overlap with other regions reserved previously.
- *
- * The resources requested by this routine are never released.
- */
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
- unsigned long flags, char *desc)
-{
- struct list_head *regions;
- struct reserved_region *reg;
- u64 end = start + length - 1;
- int ret = 0, error = 0;
-
- if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- regions = &reserved_io_regions;
- else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- regions = &reserved_mem_regions;
- else
- return -EINVAL;
-
- if (list_empty(regions))
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- list_for_each_entry(reg, regions, node)
- if (reg->start == end + 1) {
- /* The new region can be prepended to this one. */
- ret = request_range(start, end, space_id, flags, desc);
- if (!ret)
- reg->start = start;
-
- return ret;
- } else if (reg->start > end) {
- /* No overlap. Add the new region here and get out. */
- return add_region_before(start, end, space_id, flags,
- desc, &reg->node);
- } else if (reg->end == start - 1) {
- goto combine;
- } else if (reg->end >= start) {
- goto overlap;
- }
-
- /* The new region goes after the last existing one. */
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- overlap:
- /*
- * The new region overlaps an existing one.
- *
- * The head part of the new region immediately preceding the existing
- * overlapping one can be combined with it right away.
- */
- if (reg->start > start) {
- error = request_range(start, reg->start - 1, space_id, flags, desc);
- if (error)
- ret = error;
- else
- reg->start = start;
- }
-
- combine:
- /*
- * The new region is adjacent to an existing one. If it extends beyond
- * that region all the way to the next one, it is possible to combine
- * all three of them.
- */
- while (reg->end < end) {
- struct reserved_region *next = NULL;
- u64 a = reg->end + 1, b = end;
-
- if (!list_is_last(&reg->node, regions)) {
- next = list_next_entry(reg, node);
- if (next->start <= end)
- b = next->start - 1;
- }
- error = request_range(a, b, space_id, flags, desc);
- if (!error) {
- if (next && next->start == b + 1) {
- reg->end = next->end;
- list_del(&next->node);
- kfree(next);
- } else {
- reg->end = end;
- break;
- }
- } else if (next) {
- if (!ret)
- ret = error;
-
- reg = next;
- } else {
- break;
- }
- }
-
- return ret ? ret : error;
-}
-EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2649a068671d..ec256352f423 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1019,6 +1019,29 @@ static bool acpi_of_match_device(struct acpi_device *adev,
return false;
}
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+ struct acpi_hardware_id *hwid)
+{
+ int i, msk, byte_shift;
+ char buf[3];
+
+ if (!id->cls)
+ return false;
+
+ /* Apply class-code bitmask, before checking each class-code byte */
+ for (i = 1; i <= 3; i++) {
+ byte_shift = 8 * (3 - i);
+ msk = (id->cls_msk >> byte_shift) & 0xFF;
+ if (!msk)
+ continue;
+
+ sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+ if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+ return false;
+ }
+ return true;
+}
+
static const struct acpi_device_id *__acpi_match_device(
struct acpi_device *device,
const struct acpi_device_id *ids,
@@ -1036,9 +1059,12 @@ static const struct acpi_device_id *__acpi_match_device(
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
- for (id = ids; id->id[0]; id++)
- if (!strcmp((char *) id->id, hwid->id))
+ for (id = ids; id->id[0] || id->cls; id++) {
+ if (id->id[0] && !strcmp((char *) id->id, hwid->id))
return id;
+ else if (id->cls && __acpi_match_device_cls(id, hwid))
+ return id;
+ }
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
@@ -2101,6 +2127,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_UID)
pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
+ if (info->valid & ACPI_VALID_CLS)
+ acpi_add_id(pnp, info->class_code.string);
kfree(info);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6d17a3b65ef7..15e40ee62a94 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
config ATA_ACPI
bool "ATA ACPI Support"
- depends on ACPI && PCI
+ depends on ACPI
default y
help
This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 614c78f510f0..1befb114c384 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
+ .acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9c4288362a8e..894bda114224 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev)
kfree(fw_priv);
}
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_priv->buf)
+ err = do_firmware_uevent(fw_priv, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+
static struct class firmware_class = {
.name = "firmware",
.class_attrs = firmware_class_attrs,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cdd547bd67df..0ee43c1056e0 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
* This file is released under the GPLv2.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
#include <linux/suspend.h>
#include <linux/export.h>
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
({ \
type (*__routine)(struct device *__d); \
@@ -2131,6 +2134,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
+ unsigned int i;
int ret = 0;
pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +2143,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
@@ -2183,6 +2189,7 @@ int genpd_dev_pm_attach(struct device *dev)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
+ unsigned int i;
int ret;
if (!dev->of_node)
@@ -2218,10 +2225,12 @@ int genpd_dev_pm_attach(struct device *dev)
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_add_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 7470004ca810..eb6e67451dec 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- dev->power.wakeirq = wirq;
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
err = device_wakeup_attach_irq(dev, wirq);
- if (err)
- return err;
+ if (!err)
+ dev->power.wakeirq = wirq;
- return 0;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return err;
}
/**
@@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- device_wakeup_detach_irq(dev);
if (wirq->dedicated_irq)
free_irq(wirq->irq, wirq);
kfree(wirq);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 40f71603378c..51f15bc15774 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -281,32 +281,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
+ *
+ * Call under the device's power.lock lock.
*/
int device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
- int ret = 0;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
if (!ws) {
dev_err(dev, "forgot to call call device_init_wakeup?\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
- if (ws->wakeirq) {
- ret = -EEXIST;
- goto unlock;
- }
+ if (ws->wakeirq)
+ return -EEXIST;
ws->wakeirq = wakeirq;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
-
- return ret;
+ return 0;
}
/**
@@ -314,20 +307,16 @@ unlock:
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
- if (!ws)
- goto unlock;
-
- ws->wakeirq = NULL;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
+ if (ws)
+ ws->wakeirq = NULL;
}
/**
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index a6ee3d750c30..6b88a35fb048 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -419,14 +419,6 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
return 0;
}
-/* simple_positive(file->f_path.dentry) respectively debugfs_positive(),
- * but neither is "reachable" from here.
- * So we have our own inline version of it above. :-( */
-static inline int debugfs_positive(struct dentry *dentry)
-{
- return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
/* make sure at *open* time that the respective object won't go away. */
static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
void *data, struct kref *kref,
@@ -444,7 +436,7 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
/* serialize with d_delete() */
mutex_lock(&d_inode(parent)->i_mutex);
/* Make sure the object is still alive */
- if (debugfs_positive(file->f_path.dentry)
+ if (simple_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
mutex_unlock(&d_inode(parent)->i_mutex);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 40580dc7f41c..f7a4c9d7f721 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -588,7 +588,7 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
spin_lock_irq(&lo->lo_lock);
if (lo->lo_backing_file)
- p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
+ p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
spin_unlock_irq(&lo->lo_lock);
if (IS_ERR_OR_NULL(p))
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 34338d7438f5..d1d6141920d3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1474,6 +1474,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->qid = qid;
+ nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq;
/* make sure queue descriptor is set before queue count, for kthread */
@@ -1726,8 +1727,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
- if (result)
+ if (result) {
+ nvmeq->cq_vector = -1;
goto free_nvmeq;
+ }
return result;
@@ -2213,8 +2216,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname);
- if (result)
+ if (result) {
+ adminq->cq_vector = -1;
goto free_queues;
+ }
/* Free previously allocated queues that are no longer usable */
nvme_free_queues(dev, nr_io_queues + 1);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ec6c5c6e1ac9..d94529d5c8e9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -346,6 +346,7 @@ struct rbd_device {
struct rbd_image_header header;
unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
+ struct rbd_options *opts;
char *header_name;
@@ -724,34 +725,36 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
}
/*
- * mount options
+ * (Per device) rbd map options
*/
enum {
+ Opt_queue_depth,
Opt_last_int,
/* int args above */
Opt_last_string,
/* string args above */
Opt_read_only,
Opt_read_write,
- /* Boolean args above */
- Opt_last_bool,
+ Opt_err
};
static match_table_t rbd_opts_tokens = {
+ {Opt_queue_depth, "queue_depth=%d"},
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
- /* Boolean args above */
- {-1, NULL}
+ {Opt_err, NULL}
};
struct rbd_options {
+ int queue_depth;
bool read_only;
};
+#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
#define RBD_READ_ONLY_DEFAULT false
static int parse_rbd_opts_token(char *c, void *private)
@@ -761,27 +764,27 @@ static int parse_rbd_opts_token(char *c, void *private)
int token, intval, ret;
token = match_token(c, rbd_opts_tokens, argstr);
- if (token < 0)
- return -EINVAL;
-
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
- pr_err("bad mount option arg (not int) "
- "at '%s'\n", c);
+ pr_err("bad mount option arg (not int) at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
} else if (token > Opt_last_int && token < Opt_last_string) {
- dout("got string token %d val %s\n", token,
- argstr[0].from);
- } else if (token > Opt_last_string && token < Opt_last_bool) {
- dout("got Boolean token %d\n", token);
+ dout("got string token %d val %s\n", token, argstr[0].from);
} else {
dout("got token %d\n", token);
}
switch (token) {
+ case Opt_queue_depth:
+ if (intval < 1) {
+ pr_err("queue_depth out of range\n");
+ return -EINVAL;
+ }
+ rbd_opts->queue_depth = intval;
+ break;
case Opt_read_only:
rbd_opts->read_only = true;
break;
@@ -789,9 +792,10 @@ static int parse_rbd_opts_token(char *c, void *private)
rbd_opts->read_only = false;
break;
default:
- rbd_assert(false);
- break;
+ /* libceph prints "bad option" msg */
+ return -EINVAL;
}
+
return 0;
}
@@ -1563,22 +1567,39 @@ static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
/*
* Wait for an object request to complete. If interrupted, cancel the
* underlying osd request.
+ *
+ * @timeout: in jiffies, 0 means "wait forever"
*/
-static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
+static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
+ unsigned long timeout)
{
- int ret;
+ long ret;
dout("%s %p\n", __func__, obj_request);
-
- ret = wait_for_completion_interruptible(&obj_request->completion);
- if (ret < 0) {
- dout("%s %p interrupted\n", __func__, obj_request);
+ ret = wait_for_completion_interruptible_timeout(
+ &obj_request->completion,
+ ceph_timeout_jiffies(timeout));
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
rbd_obj_request_end(obj_request);
- return ret;
+ } else {
+ ret = 0;
}
- dout("%s %p done\n", __func__, obj_request);
- return 0;
+ dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
+ return ret;
+}
+
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
+{
+ return __rbd_obj_request_wait(obj_request, 0);
+}
+
+static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
+ unsigned long timeout)
+{
+ return __rbd_obj_request_wait(obj_request, timeout);
}
static void rbd_img_request_complete(struct rbd_img_request *img_request)
@@ -2001,11 +2022,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
rbd_assert(obj_request_type_valid(type));
size = strlen(object_name) + 1;
- name = kmalloc(size, GFP_KERNEL);
+ name = kmalloc(size, GFP_NOIO);
if (!name)
return NULL;
- obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+ obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
if (!obj_request) {
kfree(name);
return NULL;
@@ -2376,7 +2397,7 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
}
if (opcode == CEPH_OSD_OP_DELETE)
- osd_req_op_init(osd_request, num_ops, opcode);
+ osd_req_op_init(osd_request, num_ops, opcode, 0);
else
osd_req_op_extent_init(osd_request, num_ops, opcode,
offset, length, 0, 0);
@@ -2848,7 +2869,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
goto out;
stat_request->callback = rbd_img_obj_exists_callback;
- osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
+ osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
false, false);
rbd_osd_req_format_read(stat_request);
@@ -3122,6 +3143,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper(
bool watch)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct ceph_options *opts = osdc->client->options;
struct rbd_obj_request *obj_request;
int ret;
@@ -3148,7 +3170,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper(
if (ret)
goto out;
- ret = rbd_obj_request_wait(obj_request);
+ ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
if (ret)
goto out;
@@ -3750,10 +3772,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
rbd_dev->tag_set.ops = &rbd_mq_ops;
- rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
+ rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags =
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
rbd_dev->tag_set.nr_hw_queues = 1;
rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
@@ -3773,6 +3794,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
@@ -4044,7 +4066,8 @@ static void rbd_spec_free(struct kref *kref)
}
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
- struct rbd_spec *spec)
+ struct rbd_spec *spec,
+ struct rbd_options *opts)
{
struct rbd_device *rbd_dev;
@@ -4058,8 +4081,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
- rbd_dev->spec = spec;
rbd_dev->rbd_client = rbdc;
+ rbd_dev->spec = spec;
+ rbd_dev->opts = opts;
/* Initialize the layout used for all rbd requests */
@@ -4075,6 +4099,7 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
+ kfree(rbd_dev->opts);
kfree(rbd_dev);
}
@@ -4933,6 +4958,7 @@ static int rbd_add_parse_args(const char *buf,
goto out_mem;
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+ rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@@ -4963,8 +4989,8 @@ out_err:
*/
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
+ struct ceph_options *opts = rbdc->client->options;
u64 newest_epoch;
- unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
int tries = 0;
int ret;
@@ -4979,7 +5005,8 @@ again:
if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
ceph_monc_request_next_osdmap(&rbdc->client->monc);
(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
- newest_epoch, timeout);
+ newest_epoch,
+ opts->mount_timeout);
goto again;
} else {
/* the osdmap we have is new enough */
@@ -5148,7 +5175,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
rbdc = __rbd_get_client(rbd_dev->rbd_client);
ret = -ENOMEM;
- parent = rbd_dev_create(rbdc, parent_spec);
+ parent = rbd_dev_create(rbdc, parent_spec, NULL);
if (!parent)
goto out_err;
@@ -5394,9 +5421,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
if (rc < 0)
goto err_out_module;
- read_only = rbd_opts->read_only;
- kfree(rbd_opts);
- rbd_opts = NULL; /* done with this */
rbdc = rbd_get_client(ceph_opts);
if (IS_ERR(rbdc)) {
@@ -5422,11 +5446,12 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_client;
}
- rbd_dev = rbd_dev_create(rbdc, spec);
+ rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
if (!rbd_dev)
goto err_out_client;
rbdc = NULL; /* rbd_dev now owns this */
spec = NULL; /* rbd_dev now owns this */
+ rbd_opts = NULL; /* rbd_dev now owns this */
rc = rbd_dev_image_probe(rbd_dev, true);
if (rc < 0)
@@ -5434,6 +5459,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
/* If we are mapping a snapshot it must be marked read-only */
+ read_only = rbd_dev->opts->read_only;
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
read_only = true;
rbd_dev->mapping.read_only = read_only;
@@ -5458,6 +5484,7 @@ err_out_client:
rbd_put_client(rbdc);
err_out_args:
rbd_spec_put(spec);
+ kfree(rbd_opts);
err_out_module:
module_put(THIS_MODULE);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 0b4188b9af7c..c6dea3f6917b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void)
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 152dcb3f7b5f..61566bcefa53 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
h32mxclk->pmc = pmc;
clk = clk_register(NULL, &h32mxclk->hw);
- if (!clk)
+ if (!clk) {
+ kfree(h32mxclk);
return;
+ }
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c2400456a044..27dfa965cfed 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
ret = request_irq(osc->irq, clk_main_osc_irq_handler,
IRQF_TRIGGER_HIGH, name, osc);
- if (ret)
+ if (ret) {
+ kfree(osc);
return ERR_PTR(ret);
+ }
if (bypass)
pmc_write(pmc, AT91_CKGR_MOR,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index f98eafe9b12d..5b3ded5205a2 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
ret = request_irq(master->irq, clk_master_irq_handler,
IRQF_TRIGGER_HIGH, "clk-master", master);
- if (ret)
+ if (ret) {
+ kfree(master);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &master->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(master->irq, master);
kfree(master);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index cbbe40377ad6..18b60f4895a6 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -346,12 +346,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
id ? "clk-pllb" : "clk-plla", pll);
- if (ret)
+ if (ret) {
+ kfree(pll);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(pll->irq, pll);
kfree(pll);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index a76d03fd577b..58008b3e8bc1 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
ret = request_irq(sys->irq, clk_system_irq_handler,
IRQF_TRIGGER_HIGH, name, sys);
- if (ret)
+ if (ret) {
+ kfree(sys);
return ERR_PTR(ret);
+ }
}
clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(sys->irq, sys);
kfree(sys);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ae3263bc1476..30dd697b1668 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
ret = request_irq(utmi->irq, clk_utmi_irq_handler,
IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
- if (ret)
+ if (ret) {
+ kfree(utmi);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(utmi->irq, utmi);
kfree(utmi);
+ }
return clk;
}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e19c09cd9645..f630e1bbdcfe 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node,
struct iproc_asiu_clk *asiu_clk;
const char *clk_name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node,
err_clk_register:
for (i = 0; i < num_clks; i++)
- kfree(asiu->clks[i].name);
+ clk_unregister(asiu->clk_data.clks[i]);
iounmap(asiu->gate_base);
err_iomap_gate:
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 46fb84bc2674..2dda4e8295a9 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
val = readl(pll->pll_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
- ndiv = ndiv_int << ctrl->ndiv_int.shift;
+ ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
bit_mask(ctrl->ndiv_frac.width);
if (ndiv_frac != 0)
- ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;
+ ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
+ ndiv_frac;
}
val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
memset(&init, 0, sizeof(init));
parent_name = node->name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -690,10 +687,8 @@ void __init iproc_pll_clk_setup(struct device_node *node,
return;
err_clk_register:
- for (i = 0; i < num_clks; i++) {
- kfree(pll->clks[i].name);
+ for (i = 0; i < num_clks; i++)
clk_unregister(pll->clk_data.clks[i]);
- }
err_pll_register:
if (pll->asiu_base)
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index b9b12a742970..3f6f7ad39490 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -268,7 +268,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
memcpy(table, stm32f42xx_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
- if (WARN_ON(secondary > 8 * sizeof(table) ||
+ if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
0 == (table[BIT_ULL_WORD(secondary)] &
BIT_ULL_MASK(secondary))))
return -EINVAL;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 4b9e04cdf7e8..8b6523d15fb8 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -700,6 +700,22 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static struct clk_onecell_data *mt8173_top_clk_data __initdata;
+static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
+
+static void __init mtk_clk_enable_critical(void)
+{
+ if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
+ return;
+
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
+}
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
@@ -712,19 +728,19 @@ static void __init mtk_topckgen_init(struct device_node *node)
return;
}
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8173_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
-
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
+
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
@@ -818,13 +834,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
return;
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
mtk_apmixedsys_init);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b95d17fbb8d7..92936f0912d2 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
struct freq_tbl f = *rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- unsigned long request, src_rate;
+ unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
- int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
- struct clk *parent = clk_get_parent_by_index(hw->clk, index);
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
- src_rate = __clk_round_rate(parent, request);
- if ((src_rate < (request - delta)) ||
- (src_rate > (request + delta)))
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
continue;
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 657ca14ba709..8dd8cce27361 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -190,7 +190,7 @@ static struct clk *clk_register_flexgen(const char *name,
init.name = name;
init.ops = &flexgen_ops;
- init.flags = CLK_IS_BASIC | flexgen_flags;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -303,6 +303,8 @@ static void __init st_of_flexgen_setup(struct device_node *np)
if (!rlock)
goto err;
+ spin_lock_init(rlock);
+
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
const char *clk_name;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index e94197f04b0b..d9eb2e1d8471 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
CLKGEN_FIELD(0x30c, 0xf, 20),
CLKGEN_FIELD(0x310, 0xf, 20) },
.lockstatus_present = true,
- .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+ .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
.powerup_polarity = 1,
.standby_polarity = 1,
.pll_ops = &st_quadfs_pll_c32_ops,
@@ -489,7 +489,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
u32 npda = CLKGEN_READ(pll, npda);
- return !!npda;
+ return pll->data->powerup_polarity ? !npda : !!npda;
}
static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -635,7 +635,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
init.name = name;
init.ops = quadfs->pll_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -774,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
if (fs->lock)
spin_lock_irqsave(fs->lock, flags);
- CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+ CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
if (fs->lock)
spin_unlock_irqrestore(fs->lock, flags);
@@ -1082,10 +1082,6 @@ static const struct of_device_id quadfs_of_match[] = {
.compatible = "st,stih407-quadfs660-D",
.data = &st_fs660c32_D_407
},
- {
- .compatible = "st,stih407-quadfs660-D",
- .data = (void *)&st_fs660c32_D_407
- },
{}
};
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4fbe6e099587..717c4a91a17b 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -237,7 +237,7 @@ static struct clk *clk_register_genamux(const char *name,
init.name = name;
init.ops = &clkgena_divmux_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -513,7 +513,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
0, &clk_name))
return;
- clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
+ clk = clk_register_divider_table(NULL, clk_name, parent_name,
+ CLK_GET_RATE_NOCACHE,
reg + data->offset, data->shift, 1,
0, data->table, NULL);
if (IS_ERR(clk))
@@ -582,7 +583,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
};
static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
- .shift = 1,
+ .shift = 0,
.width = 2,
};
@@ -786,7 +787,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
&mux->hw, &clk_mux_ops,
&div->hw, &clk_divider_ops,
&gate->hw, &clk_gate_ops,
- data->clk_flags);
+ data->clk_flags |
+ CLK_GET_RATE_NOCACHE);
if (IS_ERR(clk)) {
kfree(gate);
kfree(div);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 106532207213..72d1c27eaffa 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -406,7 +406,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
init.name = clk_name;
init.ops = pll_data->ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9a82f17d2d73..abf7b37faf73 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1391,6 +1391,7 @@ static void __init sun6i_init_clocks(struct device_node *node)
CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
static void __init sun9i_init_clocks(struct device_node *node)
{
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c78423546..2d59038dec43 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -529,6 +529,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
+CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index fc897babab55..e362860c2b50 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -3,7 +3,7 @@
*
* The 2E revision of loongson processor not support this feature.
*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5bcd575fa96f..e6b658faef63 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1034,8 +1034,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev,
&sram_size);
cp->sram_size = sram_size;
- cp->sram_pool = of_get_named_gen_pool(pdev->dev.of_node,
- "marvell,crypto-srams", 0);
+ cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+ "marvell,crypto-srams", 0);
if (cp->sram_pool) {
cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
&cp->sram_dma);
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..e4311ce0cd78 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..dd7e9f3f5b6b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
- memcpy(nx_ctx->priv.ctr.iv,
+ memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *iv = nx_ctx->priv.ctr.iv;
+ u8 iv[16];
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = nx_ctx->priv.ctr.iv;
+ desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 08ac6d48688c..92c993f08213 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -317,6 +317,7 @@ out:
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
@@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = nx_ctx->priv.gcm.iv;
+ desc.info = rctx->iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
@@ -424,8 +425,8 @@ out:
static int gcm_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
static int gcm_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
switch (key_len) {
case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return -EINVAL;
}
- memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
return 0;
}
@@ -148,32 +149,29 @@ out:
return rc;
}
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
{
- struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct nx_sg *out_sg;
- int len;
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_AES);
+ err = nx_crypto_ctx_aes_xcbc_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
- memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
- memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
- len = AES_BLOCK_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ return 0;
+}
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ memset(sctx, 0, sizeof *sctx);
return 0;
}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &len, nx_ctx->ap->sglen);
+
+ if (data_len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
do {
to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
(u8 *) sctx->buffer,
&data_len,
max_sg_len);
- if (data_len != sctx->count)
- return -EINVAL;
+ if (data_len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
}
data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
&data_len,
max_sg_len);
- if (data_len != to_process - sctx->count)
- return -EINVAL;
+ if (data_len != to_process - sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
&len, nx_ctx->ap->sglen);
- if (len != sctx->count)
- return -EINVAL;
+ if (len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+ if (len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init,
+ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 4e91bdb83c59..08f8d5cd6334 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,34 +29,28 @@
#include "nx.h"
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA256_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha256_init(struct shash_desc *desc) {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
- if (len != SHA256_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
@@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
/*
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
@@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha256_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index e6a58d2ee628..aff0fe58eac0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,34 +28,29 @@
#include "nx.h"
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA512_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
@@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA512_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
/*
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
@@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha512_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index f6198f29a4a8..436971343ff7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* entry points from the crypto tfm initializers */
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
{
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct nx_ccm_rctx));
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM);
}
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
{
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_GCM);
}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index de3ea8738146..cdff03a42ae7 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
#ifndef __NX_H__
#define __NX_H__
+#include <crypto/ctr.h>
+
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
#define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
u8 iv[16];
+};
+
+struct nx_gcm_priv {
u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN];
};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
#define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
u8 iv[16];
+};
+
+struct nx_ccm_priv {
u8 b0[16];
u8 iauth_tag[16];
u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
};
struct nx_ctr_priv {
- u8 iv[16];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct nx_crypto_ctx {
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 46307098f8ba..0a70e46d5416 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
- dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
return err;
}
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 7e98084d3645..afea7fc625cc 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -151,7 +151,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
l2c->ctl_name = "octeon_l2c_err";
- if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ if (OCTEON_IS_OCTEON1PLUS()) {
union cvmx_l2t_err l2t_err;
union cvmx_l2d_err l2d_err;
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index bb19e0732681..cda6dab5067a 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -234,7 +234,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
layers[0].size = 1;
layers[0].is_virt_csrow = false;
- if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ if (OCTEON_IS_OCTEON1PLUS()) {
union cvmx_lmcx_mem_cfg0 cfg0;
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 0f83c33a7d1f..2ab6cf24c959 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -73,7 +73,7 @@ static int co_cache_error_event(struct notifier_block *this,
edac_device_handle_ce(p->ed, cpu, 0, "dcache");
/* Clear the error indication */
- if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+ if (OCTEON_IS_OCTEON2())
write_octeon_c0_dcacheerr(1);
else
write_octeon_c0_dcacheerr(0);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ca617f40574a..9fa8084a7c8d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -66,7 +66,6 @@ static int __init parse_efi_cmdline(char *str)
early_param("efi", parse_efi_cmdline);
struct kobject *efi_kobj;
-static struct kobject *efivars_kobj;
/*
* Let's not leave out systab information that snuck into
@@ -218,10 +217,9 @@ static int __init efisubsys_init(void)
goto err_remove_group;
/* and the standard mountpoint for efivarfs */
- efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
- if (!efivars_kobj) {
+ error = sysfs_create_mount_point(efi_kobj, "efivars");
+ if (error) {
pr_err("efivars: Subsystem registration failed.\n");
- error = -ENOMEM;
goto err_remove_group;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 22866d1c3d69..01657830b470 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
unsigned irq_type);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
struct amdgpu_fence **fence);
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+ uint64_t seq, struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
bool intr);
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
- u64 *target_seq, bool intr,
- long timeout);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
@@ -1622,6 +1621,7 @@ struct amdgpu_vce {
unsigned fb_version;
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
+ uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
struct delayed_work idle_work;
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 36d34e0afbc3..f82a2dd83874 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
struct amdgpu_bo_list **result,
@@ -124,6 +125,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
gws_obj = entry->robj;
if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
+
+ trace_amdgpu_bo_list_set(list, entry->robj);
}
for (i = 0; i < list->num_entries; ++i)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f09b2cba40ca..d63135bf29c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -181,8 +181,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw;
- if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
- p->num_ibs++;
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -199,7 +197,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto out;
}
- if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) {
+ switch (p->chunks[i].chunk_id) {
+ case AMDGPU_CHUNK_ID_IB:
+ p->num_ibs++;
+ break;
+
+ case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
uint32_t handle;
@@ -221,6 +224,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
r = -EINVAL;
goto out;
}
+ break;
+
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ break;
+
+ default:
+ r = -EINVAL;
+ goto out;
}
}
@@ -445,8 +456,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+ if (parser->ibs)
+ for (i = 0; i < parser->num_ibs; i++)
+ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
@@ -654,6 +666,55 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+ struct amdgpu_cs_parser *p)
+{
+ struct amdgpu_ib *ib;
+ int i, j, r;
+
+ if (!p->num_ibs)
+ return 0;
+
+ /* Add dependencies to first IB */
+ ib = &p->ibs[0];
+ for (i = 0; i < p->nchunks; ++i) {
+ struct drm_amdgpu_cs_chunk_dep *deps;
+ struct amdgpu_cs_chunk *chunk;
+ unsigned num_deps;
+
+ chunk = &p->chunks[i];
+
+ if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
+ continue;
+
+ deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+ for (j = 0; j < num_deps; ++j) {
+ struct amdgpu_fence *fence;
+ struct amdgpu_ring *ring;
+
+ r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
+ deps[j].ip_instance,
+ deps[j].ring, &ring);
+ if (r)
+ return r;
+
+ r = amdgpu_fence_recreate(ring, p->filp,
+ deps[j].handle,
+ &fence);
+ if (r)
+ return r;
+
+ amdgpu_sync_fence(&ib->sync, fence);
+ amdgpu_fence_unref(&fence);
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -688,11 +749,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
else
DRM_ERROR("Failed to process the buffer list %d!\n", r);
}
- } else {
+ }
+
+ if (!r) {
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
}
+ if (!r)
+ r = amdgpu_cs_dependencies(adev, &parser);
+
if (r) {
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
up_read(&adev->exclusive_lock);
@@ -730,9 +796,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
- uint64_t seq[AMDGPU_MAX_RINGS] = {0};
- struct amdgpu_ring *ring = NULL;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
+ struct amdgpu_fence *fence = NULL;
+ struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
long r;
@@ -745,9 +811,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (r)
return r;
- seq[ring->idx] = wait->in.handle;
+ r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
+ if (r)
+ return r;
- r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
+ r = fence_wait_timeout(&fence->base, true, timeout);
+ amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fec487d1c870..ba46be361c9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,7 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
-
+ adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
+ if (adev->ip_block_enabled == NULL)
+ return -ENOMEM;
if (adev->ip_blocks == NULL) {
DRM_ERROR("No IP blocks found!\n");
@@ -1575,8 +1577,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- if (adev->ip_block_enabled)
- kfree(adev->ip_block_enabled);
+ kfree(adev->ip_block_enabled);
adev->ip_block_enabled = NULL;
adev->accel_working = false;
/* free i2c buses */
@@ -2000,4 +2001,10 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
void amdgpu_debugfs_cleanup(struct drm_minor *minor)
{
}
+#else
+static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5c9918d01bf9..a7189a1fa6a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,6 +136,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
}
/**
+ * amdgpu_fence_recreate - recreate a fence from an user fence
+ *
+ * @ring: ring the fence is associated with
+ * @owner: creator of the fence
+ * @seq: user fence sequence number
+ * @fence: resulting amdgpu fence object
+ *
+ * Recreates a fence command from the user fence sequence number (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+ uint64_t seq, struct amdgpu_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (seq > ring->fence_drv.sync_seq[ring->idx])
+ return -EINVAL;
+
+ *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if ((*fence) == NULL)
+ return -ENOMEM;
+
+ (*fence)->seq = seq;
+ (*fence)->ring = ring;
+ (*fence)->owner = owner;
+ fence_init(&(*fence)->base, &amdgpu_fence_ops,
+ &adev->fence_queue.lock, adev->fence_context + ring->idx,
+ (*fence)->seq);
+ return 0;
+}
+
+/**
* amdgpu_fence_check_signaled - callback from fence_queue
*
* this function is called with fence_queue lock held, which is also used
@@ -517,12 +549,14 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
* the wait timeout, or an error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
*/
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
- bool intr, long timeout)
+static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
+ u64 *target_seq, bool intr,
+ long timeout)
{
uint64_t last_seq[AMDGPU_MAX_RINGS];
bool signaled;
- int i, r;
+ int i;
+ long r;
if (timeout == 0) {
return amdgpu_fence_any_seq_signaled(adev, target_seq);
@@ -1023,7 +1057,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
amdgpu_fence_process(ring);
- seq_printf(m, "--- ring %d ---\n", i);
+ seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%016llx\n",
@@ -1031,7 +1065,8 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
struct amdgpu_ring *other = adev->rings[j];
- if (i != j && other && other->fence_drv.initialized)
+ if (i != j && other && other->fence_drv.initialized &&
+ ring->fence_drv.sync_seq[j])
seq_printf(m, "Last sync to ring %d 0x%016llx\n",
j, ring->fence_drv.sync_seq[j]);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ec222295fee..ae43b58c9733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+ timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
@@ -496,7 +496,7 @@ error_unreserve:
error_free:
drm_free_large(vm_bos);
- if (r)
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
@@ -525,8 +525,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE);
+ invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
if ((args->flags & invalid_flags)) {
dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
args->flags, invalid_flags);
@@ -579,7 +579,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
- if (!r)
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index b56dd64bd4ea..961d7265c286 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -30,19 +30,21 @@ TRACE_EVENT(amdgpu_cs,
TP_PROTO(struct amdgpu_cs_parser *p, int i),
TP_ARGS(p, i),
TP_STRUCT__entry(
+ __field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
__field(u32, dw)
__field(u32, fences)
),
TP_fast_assign(
+ __entry->bo_list = p->bo_list;
__entry->ring = p->ibs[i].ring->idx;
__entry->dw = p->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted(
p->ibs[i].ring);
),
- TP_printk("ring=%u, dw=%u, fences=%u",
- __entry->ring, __entry->dw,
+ TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+ __entry->bo_list, __entry->ring, __entry->dw,
__entry->fences)
);
@@ -61,6 +63,54 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
);
+TRACE_EVENT(amdgpu_vm_bo_map,
+ TP_PROTO(struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(bo_va, mapping),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(long, start)
+ __field(long, last)
+ __field(u64, offset)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo_va->bo;
+ __entry->start = mapping->it.start;
+ __entry->last = mapping->it.last;
+ __entry->offset = mapping->offset;
+ __entry->flags = mapping->flags;
+ ),
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ __entry->bo, __entry->start, __entry->last,
+ __entry->offset, __entry->flags)
+);
+
+TRACE_EVENT(amdgpu_vm_bo_unmap,
+ TP_PROTO(struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(bo_va, mapping),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(long, start)
+ __field(long, last)
+ __field(u64, offset)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo_va->bo;
+ __entry->start = mapping->it.start;
+ __entry->last = mapping->it.last;
+ __entry->offset = mapping->offset;
+ __entry->flags = mapping->flags;
+ ),
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ __entry->bo, __entry->start, __entry->last,
+ __entry->offset, __entry->flags)
+);
+
TRACE_EVENT(amdgpu_vm_bo_update,
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
TP_ARGS(mapping),
@@ -121,6 +171,21 @@ TRACE_EVENT(amdgpu_vm_flush,
__entry->pd_addr, __entry->ring, __entry->id)
);
+TRACE_EVENT(amdgpu_bo_list_set,
+ TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
+ TP_ARGS(list, bo),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo_list *, list)
+ __field(struct amdgpu_bo *, bo)
+ ),
+
+ TP_fast_assign(
+ __entry->list = list;
+ __entry->bo = bo;
+ ),
+ TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+);
+
DECLARE_EVENT_CLASS(amdgpu_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d3706a498293..dd3415d2e45d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -674,7 +674,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
if (gtt && gtt->userptr) {
- ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+ ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 1127a504f118..d3ca73090e39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -464,28 +464,42 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
* @p: parser context
* @lo: address of lower dword
* @hi: address of higher dword
+ * @size: minimum size
*
* Patch relocation inside command stream with real buffer address
*/
-int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi)
+static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+ int lo, int hi, unsigned size, uint32_t index)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_ib *ib = &p->ibs[ib_idx];
struct amdgpu_bo *bo;
uint64_t addr;
+ if (index == 0xffffffff)
+ index = 0;
+
addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+ addr += ((uint64_t)size) * ((uint64_t)index);
mapping = amdgpu_cs_find_mapping(p, addr, &bo);
if (mapping == NULL) {
- DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+ addr, lo, hi, size, index);
+ return -EINVAL;
+ }
+
+ if ((addr + (uint64_t)size) >
+ ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo);
+ addr -= ((uint64_t)size) * ((uint64_t)index);
ib->ptr[lo] = addr & 0xFFFFFFFF;
ib->ptr[hi] = addr >> 32;
@@ -494,6 +508,48 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
}
/**
+ * amdgpu_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ * @allocated: allocated a new handle?
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
+ uint32_t handle, bool *allocated)
+{
+ unsigned i;
+
+ *allocated = false;
+
+ /* validate the handle */
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+ if (atomic_read(&p->adev->vce.handles[i]) == handle) {
+ if (p->adev->vce.filp[i] != p->filp) {
+ DRM_ERROR("VCE handle collision detected!\n");
+ return -EINVAL;
+ }
+ return i;
+ }
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
+ p->adev->vce.filp[i] = p->filp;
+ p->adev->vce.img_size[i] = 0;
+ *allocated = true;
+ return i;
+ }
+ }
+
+ DRM_ERROR("No more free VCE handles!\n");
+ return -EINVAL;
+}
+
+/**
* amdgpu_vce_cs_parse - parse and validate the command stream
*
* @p: parser context
@@ -501,10 +557,15 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
*/
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
{
- uint32_t handle = 0;
- bool destroy = false;
- int i, r, idx = 0;
struct amdgpu_ib *ib = &p->ibs[ib_idx];
+ unsigned fb_idx = 0, bs_idx = 0;
+ int session_idx = -1;
+ bool destroyed = false;
+ bool created = false;
+ bool allocated = false;
+ uint32_t tmp, handle = 0;
+ uint32_t *size = &tmp;
+ int i, r = 0, idx = 0;
amdgpu_vce_note_usage(p->adev);
@@ -514,16 +575,44 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (destroyed) {
+ DRM_ERROR("No other command allowed after destroy!\n");
+ r = -EINVAL;
+ goto out;
}
switch (cmd) {
case 0x00000001: // session
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0)
+ return session_idx;
+ size = &p->adev->vce.img_size[session_idx];
break;
case 0x00000002: // task info
+ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
+ bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+ break;
+
case 0x01000001: // create
+ created = true;
+ if (!allocated) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
+ amdgpu_get_ib_value(p, ib_idx, idx + 10) *
+ 8 * 3 / 2;
+ break;
+
case 0x04000001: // config extension
case 0x04000002: // pic control
case 0x04000005: // rate control
@@ -534,60 +623,74 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x03000001: // encode
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9);
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
+ *size, 0);
if (r)
- return r;
+ goto out;
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11);
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
+ *size / 3, 0);
if (r)
- return r;
+ goto out;
break;
case 0x02000001: // destroy
- destroy = true;
+ destroyed = true;
break;
case 0x05000001: // context buffer
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ *size * 2, 0);
+ if (r)
+ goto out;
+ break;
+
case 0x05000004: // video bitstream buffer
+ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ tmp, bs_idx);
+ if (r)
+ goto out;
+ break;
+
case 0x05000005: // feedback buffer
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2);
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ 4096, fb_idx);
if (r)
- return r;
+ goto out;
break;
default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
- idx += len / 4;
- }
-
- if (destroy) {
- /* IB contains a destroy msg, free the handle */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
- atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
- return 0;
+ idx += len / 4;
}
- /* create or encode, validate the handle */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->adev->vce.handles[i]) == handle)
- return 0;
+ if (allocated && !created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
}
- /* handle not found try to alloc a new one */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
- p->adev->vce.filp[i] = p->filp;
- return 0;
- }
+out:
+ if ((!r && destroyed) || (r && allocated)) {
+ /*
+ * IB contains a destroy msg or we have allocated an
+ * handle and got an error, anyway free the handle
+ */
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
}
- DRM_ERROR("No more free VCE handles!\n");
-
- return -EINVAL;
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index b6a9d0956c60..7ccdb5927da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -33,7 +33,6 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
-int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
struct amdgpu_semaphore *semaphore,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 407882b233c7..9a4e3b63f1cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1001,6 +1001,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->mappings);
interval_tree_insert(&mapping->it, &vm->va);
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
bo_va->addr = 0;
@@ -1058,6 +1059,7 @@ error_free:
mutex_lock(&vm->mutex);
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
+ trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
error_unlock:
@@ -1099,6 +1101,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
mutex_lock(&vm->mutex);
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
+ trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (bo_va->addr) {
/* clear the old address */
@@ -1139,6 +1142,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
+ trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (bo_va->addr)
list_add(&mapping->list, &vm->freed);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 5dab578d6462..341c56681841 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2256,10 +2256,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
- if (adev->ip_block_enabled == NULL)
- return -ENOMEM;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 220865a44814..d19085a97064 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -552,4 +552,10 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+enum {
+ MTYPE_CACHED = 0,
+ MTYPE_NONCACHED = 3
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index e4936a452bc6..f75a31df30bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->mgcg_cgtt_local1 = 0x0;
pi->clock_slow_down_step = 25000;
pi->skip_clock_slow_down = 1;
- pi->enable_nb_ps_policy = 1;
+ pi->enable_nb_ps_policy = 0;
pi->caps_power_containment = true;
pi->caps_cac = true;
pi->didt_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
index 782a74107664..99e1afc89629 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
@@ -46,7 +46,7 @@
/* Do not change the following, it is also defined in SMU8.h */
#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00100000
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000
#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..6e77964f1b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v10_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v10_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..7f7abb0e0be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v11_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 72c27ac915f2..08387dfd98a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
@@ -3379,7 +3385,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask, int_control, tmp;
unsigned hpd;
- if (entry->src_data > 6) {
+ if (entry->src_data >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index cb7907447b81..2c188fb9fd22 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2010,6 +2010,46 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
}
/**
+ * gmc_v7_0_init_compute_vmid - gart enable
+ *
+ * @rdev: amdgpu_device pointer
+ *
+ * Initialize compute vmid sh_mem registers
+ *
+ */
+#define DEFAULT_SH_MEM_BASES (0x6000)
+#define FIRST_COMPUTE_VMID (8)
+#define LAST_COMPUTE_VMID (16)
+static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+ sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+ sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
+ mutex_lock(&adev->srbm_mutex);
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ cik_srbm_select(adev, 0, 0, 0, i);
+ /* CP and shaders */
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, 1);
+ WREG32(mmSH_MEM_APE1_LIMIT, 0);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
* gfx_v7_0_gpu_init - setup the 3D engine
*
* @adev: amdgpu_device pointer
@@ -2230,6 +2270,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ gmc_v7_0_init_compute_vmid(adev);
+
WREG32(mmSX_DEBUG_1, 0x20);
WREG32(mmTA_CNTL_AUX, 0x00010000);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 14242bd33363..7b683fb2173c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1894,6 +1894,51 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
mutex_unlock(&adev->grbm_idx_mutex);
}
+/**
+ * gmc_v8_0_init_compute_vmid - gart enable
+ *
+ * @rdev: amdgpu_device pointer
+ *
+ * Initialize compute vmid sh_mem registers
+ *
+ */
+#define DEFAULT_SH_MEM_BASES (0x6000)
+#define FIRST_COMPUTE_VMID (8)
+#define LAST_COMPUTE_VMID (16)
+static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+
+ sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
+ SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+ mutex_lock(&adev->srbm_mutex);
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ vi_srbm_select(adev, 0, 0, 0, i);
+ /* CP and shaders */
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, 1);
+ WREG32(mmSH_MEM_APE1_LIMIT, 0);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+ }
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -2113,6 +2158,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ gmc_v8_0_init_compute_vmid(adev);
+
mutex_lock(&adev->grbm_idx_mutex);
/*
* making sure that the following register writes will be broadcasted
@@ -3081,7 +3128,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- AMDGPU_DOORBELL_MEC_RING7 << 2);
+ 0x7FFFF << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3097,6 +3144,12 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ ring->wptr = 0;
+ mqd->cp_hqd_pq_wptr = ring->wptr;
+ WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+ mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index e3c1fde75363..7bb37b93993f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -439,6 +439,31 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl;
+ int i;
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+ if (enable)
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 1);
+ else
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 0);
+ WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+ }
+}
+
+/**
* sdma_v3_0_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
@@ -648,6 +673,8 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
/* unhalt the MEs */
sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
@@ -1079,6 +1106,7 @@ static int sdma_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 90fc93c2c1d0..fa5a4448531d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1189,10 +1189,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
- if (adev->ip_block_enabled == NULL)
- return -ENOMEM;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
+
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
+
if (pdd->reset_wavefronts) {
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
* We don't call amd_iommu_unbind_pasid() here
* because the IOMMU called us.
*/
- if (pdd)
- pdd->bound = false;
+ pdd->bound = false;
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b69ed97d447c..b9ba06176eb1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -4732,7 +4732,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return 0;
if (edid)
- size = EDID_LENGTH + (1 + edid->extensions);
+ size = EDID_LENGTH * (1 + edid->extensions);
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..d65cbe6afb92 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_object_create_stolen(dev, size);
- if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 619dad1b2386..dcc6a88c560e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -516,17 +516,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
- continue;
+ break;
pd = ppgtt->pdp.page_directory[pdpe];
if (WARN_ON(!pd->page_table[pde]))
- continue;
+ break;
pt = pd->page_table[pde];
if (WARN_ON(!pt->page))
- continue;
+ break;
page_table = pt->page;
@@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ struct i915_vma *vma;
+ bool flush;
i915_check_and_clear_faults(dev);
@@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.total,
true);
+ /* Cache flush objects bound into GGTT and rebind them. */
+ vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
- if (!vma)
- continue;
+ flush = false;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (vma->vm != vm)
+ continue;
- i915_gem_clflush_object(obj, obj->pin_display);
- WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
- }
+ WARN_ON(i915_vma_bind(vma, obj->cache_level,
+ PIN_UPDATE));
+ flush = true;
+ }
+
+ if (flush)
+ i915_gem_clflush_object(obj, obj->pin_display);
+ }
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d61e74a08f82 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ /* Since the swizzling may vary within an
+ * object, we have no idea what the swizzling
+ * is for any page in particular. Thus we
+ * cannot migrate tiled pages using the GPU,
+ * nor can we tell userspace what the exact
+ * swizzling is for any object.
+ */
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
}
if (dcc == 0xffffffff) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f5edb3504167..2030f602cbf8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3491,6 +3491,7 @@ enum skl_disp_power_wells {
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
+#define BLM_HISTOGRAM_ENABLE (1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dcb1d25d6f05..647b1404c441 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
struct intel_plane *intel_plane;
int pipe = intel_crtc->pipe;
+ if (!intel_crtc->active)
+ return;
+
intel_crtc_wait_for_pending_flips(crtc);
intel_pre_disable_primary(crtc);
@@ -7887,7 +7890,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7898,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
@@ -13303,6 +13309,16 @@ intel_check_primary_plane(struct drm_plane *plane,
intel_crtc->atomic.wait_vblank = true;
}
+ /*
+ * FIXME: Actually if we will still have any other plane enabled
+ * on the pipe we could let IPS enabled still, but for
+ * now lets consider that when we make primary invisible
+ * by setting DSPCNTR to 0 on update_primary_plane function
+ * IPS needs to be disable.
+ */
+ if (!state->visible || !fb)
+ intel_crtc->atomic.disable_ips = true;
+
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -13400,6 +13416,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
if (intel_crtc->atomic.disable_fbc)
intel_fbc_disable(dev);
+ if (intel_crtc->atomic.disable_ips)
+ hsw_disable_ips(intel_crtc);
+
if (intel_crtc->atomic.pre_disable_primary)
intel_pre_disable_primary(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 76afc62373d7..6e8faa253792 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1140,6 +1140,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
{
+ memset(&pipe_config->dpll_hw_state, 0,
+ sizeof(pipe_config->dpll_hw_state));
+
switch (link_bw) {
case DP_LINK_BW_1_62:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2afb31a46275..105928382e21 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -485,6 +485,7 @@ struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
+ bool disable_ips;
bool pre_disable_primary;
bool update_wm;
unsigned disabled_planes;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 7d83527f95f7..55aad2322e10 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -907,6 +907,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ /*
+ * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+ * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+ * that has backlight.
+ */
+ if (IS_GEN2(dev))
+ I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
- msecs_to_jiffies(1))) {
+ msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
-int omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
-int omap_gem_put_paddr(struct drm_gem_object *obj);
+void omap_gem_put_paddr(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
/* PVR needs alignment to 8 pixels.. right now that is the most
* restrictive stride requirement..
*/
- return ALIGN(pitch, 8 * bytespp);
+ return roundup(pitch, 8 * bytespp);
}
/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
}
/* unpin, no longer being scanned out: */
-int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
mutex_lock(&omap_fb->lock);
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
if (omap_fb->pin_count > 0) {
mutex_unlock(&omap_fb->lock);
- return 0;
+ return;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_put_paddr(plane->bo);
- if (ret)
- goto fail;
+ omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
}
mutex_unlock(&omap_fb->lock);
-
- return 0;
-
-fail:
- mutex_unlock(&omap_fb->lock);
- return ret;
}
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..720d16bce7e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
- mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
/* Release physical address, when DMA is no longer being performed.. this
* could potentially unpin and unmap buffers from TILER
*/
-int omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_put_paddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
+ int ret;
mutex_lock(&obj->dev->struct_mutex);
if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
if (ret) {
dev_err(obj->dev->dev,
"could not unpin pages: %d\n", ret);
- goto fail;
}
ret = tiler_release(omap_obj->block);
if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
omap_obj->block = NULL;
}
}
-fail:
+
mutex_unlock(&obj->dev->struct_mutex);
- return ret;
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
- goto fail;
-
- spin_lock(&priv->list_lock);
- list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ return NULL;
obj = &omap_obj->base;
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
*/
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL);
- if (omap_obj->vaddr)
- flags |= OMAP_BO_DMA;
+ if (!omap_obj->vaddr) {
+ kfree(omap_obj);
+
+ return NULL;
+ }
+ flags |= OMAP_BO_DMA;
}
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
omap_obj->flags = flags;
if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
dispc_ovl_enable(omap_plane->id, false);
}
+static int omap_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+
+ if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.prepare_fb = omap_plane_prepare_fb,
.cleanup_fb = omap_plane_cleanup_fb,
+ .atomic_check = omap_plane_atomic_check,
.atomic_update = omap_plane_atomic_update,
.atomic_disable = omap_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b0688b0c8908..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4604,6 +4604,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
+static void cik_compute_stop(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 j, tmp;
+
+ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+ /* Disable wptr polling. */
+ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+ tmp &= ~WPTR_POLL_EN;
+ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+ /* Disable HQD. */
+ if (RREG32(CP_HQD_ACTIVE) & 1) {
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (!(RREG32(CP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(CP_HQD_PQ_RPTR, 0);
+ WREG32(CP_HQD_PQ_WPTR, 0);
+ }
+ cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
/**
* cik_cp_compute_enable - enable/disable the compute CP MEs
*
@@ -4617,6 +4642,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_MEC_CNTL, 0);
else {
+ /*
+ * To make hibernation reliable we need to clear compute ring
+ * configuration before halting the compute ring.
+ */
+ mutex_lock(&rdev->srbm_mutex);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ mutex_unlock(&rdev->srbm_mutex);
+
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
@@ -7930,23 +7964,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7956,23 +7994,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7982,23 +8024,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8008,23 +8054,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8034,23 +8084,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8060,23 +8114,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8096,88 +8154,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f86eb54e7763..d16f2eebd95e 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
}
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+
+ /* FIXME use something else than big hammer but after few days can not
+ * seem to find good combination so reset SDMA blocks as it seems we
+ * do not shut them down properly. This fix hibernation and does not
+ * affect suspend to ram.
+ */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
}
/**
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
case 44: /* hdmi */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (rdev->family == CHIP_ARUBA) {
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ if (!r)
+ r = vce_v1_0_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ }
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
+ if (rdev->family == CHIP_ARUBA)
+ radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 4:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 5:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 10:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 12:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
case 21: /* hdmi */
switch (src_data) {
case 4:
- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
struct drm_buf *buf;
u32 *buffer;
const u8 __user *data;
- int size, pass_size;
+ unsigned int size, pass_size;
u64 src_offset, dst_offset;
if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index c89215275053..fa719c53449b 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector,
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
- struct radeon_connector *radeon_connector;
- int sink_type;
-
if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
radeon_encoder->audio = NULL;
return;
}
- radeon_connector = to_radeon_connector(connector);
- sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
- radeon_encoder->audio = rdev->audio.dp_funcs;
- else
+ if (radeon_dp_getsinktype(radeon_connector) ==
+ CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_encoder->audio = rdev->audio.dp_funcs;
+ else
+ radeon_encoder->audio = rdev->audio.hdmi_funcs;
+ } else {
radeon_encoder->audio = rdev->audio.hdmi_funcs;
+ }
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ }
+
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
- (yorigin * 256)));
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+ yorigin * 256);
}
radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
return ret;
}
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- uint64_t gpu_addr;
- int ret;
-
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-
- return 0;
-
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
+ struct radeon_bo *robj;
int ret;
if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return -ENOENT;
}
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ radeon_bo_unreserve(robj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_crtc->cursor_hot_y = hot_y;
}
- ret = radeon_set_cursor(crtc, obj);
-
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
- ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
@@ -341,8 +327,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- if (radeon_crtc->cursor_bo != obj)
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
void radeon_cursor_reset(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int ret;
if (radeon_crtc->cursor_bo) {
radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
radeon_crtc->cursor_y);
- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not showing "
- "cursor\n", ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
}
/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+ /* default to a larger gart size on newer asics */
+ if (family >= CHIP_TAHITI)
+ return 2048;
+ else if (family >= CHIP_RV770)
+ return 1024;
+ else
+ return 512;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
if (radeon_gart_size == -1) {
- /* default to a larger gart size on newer asics */
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
- /* unpin the front buffers */
+ /* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ }
+
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_crtc *crtc;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev);
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ /* Only 27 bit offset for legacy cursor */
+ r = radeon_bo_pin_restricted(robj,
+ RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ?
+ 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ radeon_bo_unreserve(robj);
+ }
+ }
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..013ec7106e55 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, true);
+
+ r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ if (r == 0)
+ r = -EBUSY;
+ else
+ r = 0;
+
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -471,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..07909d817381 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -343,7 +343,6 @@ struct radeon_crtc {
int max_cursor_width;
int max_cursor_height;
uint32_t legacy_display_base_addr;
- uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index edafd3c2b170..06ac59fe332a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -719,7 +719,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
if (gtt && gtt->userptr) {
- ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+ ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 3662157c2b15..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
if (bo_va->it.start || bo_va->it.last) {
- spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->vm_status)) {
- /* add a clone of the bo_va to clear the old address */
- struct radeon_bo_va *tmp;
- spin_unlock(&vm->status_lock);
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
- mutex_unlock(&vm->mutex);
- r = -ENOMEM;
- goto error_unreserve;
- }
- tmp->it.start = bo_va->it.start;
- tmp->it.last = bo_va->it.last;
- tmp->vm = vm;
- tmp->bo = radeon_bo_ref(bo_va->bo);
- spin_lock(&vm->status_lock);
- list_add(&tmp->vm_status, &vm->freed);
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ r = -ENOMEM;
+ goto error_unreserve;
}
- spin_unlock(&vm->status_lock);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
bo_va->it.start = 0;
bo_va->it.last = 0;
+ list_del_init(&bo_va->vm_status);
+ list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
if (soffset || eoffset) {
+ spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
- interval_tree_insert(&bo_va->it, &vm->va);
- spin_lock(&vm->status_lock);
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ interval_tree_insert(&bo_va->it, &vm->va);
}
bo_va->flags = flags;
@@ -1129,12 +1126,12 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
interval_tree_remove(&bo_va->it, &vm->va);
spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->vm_status)) {
+ list_del(&bo_va->vm_status);
+ if (bo_va->it.start || bo_va->it.last) {
bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
radeon_fence_unref(&bo_va->last_pt_update);
- list_del(&bo_va->vm_status);
kfree(bo_va);
}
spin_unlock(&vm->status_lock);
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
+ if (list_empty(&bo_va->vm_status) &&
+ (bo_va->it.start || bo_va->it.last))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
spin_unlock(&bo_va->vm->status_lock);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 2a808822af21..37c16afe007a 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -777,7 +777,7 @@ static int __init i8k_init_hwmon(void)
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
- i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell-smm",
+ i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm",
NULL, i8k_groups);
if (IS_ERR(i8k_hwmon_dev)) {
err = PTR_ERR(i8k_hwmon_dev);
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index d219c06a857b..972444a14cca 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -31,14 +31,11 @@
/* output format */
#define MCP3021_SAR_SHIFT 2
#define MCP3021_SAR_MASK 0x3ff
-
#define MCP3021_OUTPUT_RES 10 /* 10-bit resolution */
-#define MCP3021_OUTPUT_SCALE 4
#define MCP3221_SAR_SHIFT 0
#define MCP3221_SAR_MASK 0xfff
#define MCP3221_OUTPUT_RES 12 /* 12-bit resolution */
-#define MCP3221_OUTPUT_SCALE 1
enum chips {
mcp3021,
@@ -54,7 +51,6 @@ struct mcp3021_data {
u16 sar_shift;
u16 sar_mask;
u8 output_res;
- u8 output_scale;
};
static int mcp3021_read16(struct i2c_client *client)
@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
{
- if (val == 0)
- return 0;
-
- val = val * data->output_scale - data->output_scale / 2;
-
- return val * DIV_ROUND_CLOSEST(data->vdd,
- (1 << data->output_res) * data->output_scale);
+ return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
}
static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
data->sar_shift = MCP3021_SAR_SHIFT;
data->sar_mask = MCP3021_SAR_MASK;
data->output_res = MCP3021_OUTPUT_RES;
- data->output_scale = MCP3021_OUTPUT_SCALE;
break;
case mcp3221:
data->sar_shift = MCP3221_SAR_SHIFT;
data->sar_mask = MCP3221_SAR_MASK;
data->output_res = MCP3221_OUTPUT_RES;
- data->output_scale = MCP3221_OUTPUT_SCALE;
break;
}
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 55765790907b..28fcb2e246d5 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -547,7 +547,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
if (index >= 9 && index < 18 &&
(reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08) /* RD2 */
return 0;
- if (index >= 18 && index < 27 && (reg & 0x30) != 0x10) /* RD3 */
+ if (index >= 18 && index < 27 && (reg & 0x30) != 0x20) /* RD3 */
return 0;
if (index >= 27 && index < 35) /* local */
return attr->mode;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index b10353b31806..697007afb99c 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1937,27 +1937,11 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
int r1, int r2)
{
- u16 tmp;
-
- tmp = data->temp_src[r1];
- data->temp_src[r1] = data->temp_src[r2];
- data->temp_src[r2] = tmp;
-
- tmp = data->reg_temp[r1];
- data->reg_temp[r1] = data->reg_temp[r2];
- data->reg_temp[r2] = tmp;
-
- tmp = data->reg_temp_over[r1];
- data->reg_temp_over[r1] = data->reg_temp_over[r2];
- data->reg_temp_over[r2] = tmp;
-
- tmp = data->reg_temp_hyst[r1];
- data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2];
- data->reg_temp_hyst[r2] = tmp;
-
- tmp = data->reg_temp_config[r1];
- data->reg_temp_config[r1] = data->reg_temp_config[r2];
- data->reg_temp_config[r2] = tmp;
+ swap(data->temp_src[r1], data->temp_src[r2]);
+ swap(data->reg_temp[r1], data->reg_temp[r2]);
+ swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
+ swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
+ swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
}
static void
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 4068db4d9580..0a8bce726b4b 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -289,10 +289,7 @@ struct w83792d_data {
u8 temp1[3]; /* current, over, thyst */
u8 temp_add[2][6]; /* Register value */
u8 fan_div[7]; /* Register encoding, shifted right */
- u8 pwm[7]; /*
- * We only consider the first 3 set of pwm,
- * although 792 chip has 7 set of pwm.
- */
+ u8 pwm[7]; /* The 7 PWM outputs */
u8 pwmenable[3];
u32 alarms; /* realtime status register encoding,combined */
u8 chassis; /* Chassis status */
@@ -1075,6 +1072,10 @@ static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm6, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm7, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 6);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 1);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
@@ -1087,6 +1088,14 @@ static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO,
show_pwm_mode, store_pwm_mode, 1);
static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO,
show_pwm_mode, store_pwm_mode, 2);
+static SENSOR_DEVICE_ATTR(pwm4_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, store_pwm_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm5_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, store_pwm_mode, 4);
+static SENSOR_DEVICE_ATTR(pwm6_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, store_pwm_mode, 5);
+static SENSOR_DEVICE_ATTR(pwm7_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, store_pwm_mode, 6);
static SENSOR_DEVICE_ATTR(tolerance1, S_IWUSR | S_IRUGO,
show_tolerance, store_tolerance, 1);
static SENSOR_DEVICE_ATTR(tolerance2, S_IWUSR | S_IRUGO,
@@ -1177,30 +1186,38 @@ static SENSOR_DEVICE_ATTR(fan6_div, S_IWUSR | S_IRUGO,
static SENSOR_DEVICE_ATTR(fan7_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 7);
-static struct attribute *w83792d_attributes_fan[4][5] = {
+static struct attribute *w83792d_attributes_fan[4][7] = {
{
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_div.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm4_mode.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan5_input.dev_attr.attr,
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_div.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
+ &sensor_dev_attr_pwm5.dev_attr.attr,
+ &sensor_dev_attr_pwm5_mode.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan6_input.dev_attr.attr,
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan6_div.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
+ &sensor_dev_attr_pwm6.dev_attr.attr,
+ &sensor_dev_attr_pwm6_mode.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan7_input.dev_attr.attr,
&sensor_dev_attr_fan7_min.dev_attr.attr,
&sensor_dev_attr_fan7_div.dev_attr.attr,
&sensor_dev_attr_fan7_alarm.dev_attr.attr,
+ &sensor_dev_attr_pwm7.dev_attr.attr,
+ &sensor_dev_attr_pwm7_mode.dev_attr.attr,
NULL
}
};
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 3612cb5b30b2..73a401662853 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -18,6 +18,30 @@ config HWSPINLOCK_OMAP
If unsure, say N.
+config HWSPINLOCK_QCOM
+ tristate "Qualcomm Hardware Spinlock device"
+ depends on ARCH_QCOM
+ select HWSPINLOCK
+ select MFD_SYSCON
+ help
+ Say y here to support the Qualcomm Hardware Mutex functionality, which
+ provides a synchronisation mechanism for the various processors on
+ the SoC.
+
+ If unsure, say N.
+
+config HWSPINLOCK_SIRF
+ tristate "SIRF Hardware Spinlock device"
+ depends on ARCH_SIRF
+ select HWSPINLOCK
+ help
+ Say y here to support the SIRF Hardware Spinlock device, which
+ provides a synchronisation mechanism for the various processors
+ on the SoC.
+
+ It's safe to say n here if you're not interested in SIRF hardware
+ spinlock or just want a bare minimum kernel.
+
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on ARCH_U8500
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 93eb64b66486..6b59cb5a4f3a 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 461a0d739d75..52f708bcf77f 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -27,6 +27,7 @@
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include "hwspinlock_internal.h"
@@ -257,6 +258,84 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
+/**
+ * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+ * @bank: the hwspinlock device bank
+ * @hwlock_spec: hwlock specifier as found in the device tree
+ *
+ * This is a simple translation function, suitable for hwspinlock platform
+ * drivers that only has a lock specifier length of 1.
+ *
+ * Returns a relative index of the lock within a specified bank on success,
+ * or -EINVAL on invalid specifier cell count.
+ */
+static inline int
+of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
+{
+ if (WARN_ON(hwlock_spec->args_count != 1))
+ return -EINVAL;
+
+ return hwlock_spec->args[0];
+}
+
+/**
+ * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
+ * @np: device node from which to request the specific hwlock
+ * @index: index of the hwlock in the list of values
+ *
+ * This function provides a means for DT users of the hwspinlock module to
+ * get the global lock id of a specific hwspinlock using the phandle of the
+ * hwspinlock device, so that it can be requested using the normal
+ * hwspin_lock_request_specific() API.
+ *
+ * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
+ * device is not yet registered, -EINVAL on invalid args specifier value or an
+ * appropriate error as returned from the OF parsing of the DT client node.
+ */
+int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+ struct of_phandle_args args;
+ struct hwspinlock *hwlock;
+ struct radix_tree_iter iter;
+ void **slot;
+ int id;
+ int ret;
+
+ ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
+ &args);
+ if (ret)
+ return ret;
+
+ /* Find the hwspinlock device: we need its base_id */
+ ret = -EPROBE_DEFER;
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
+ hwlock = radix_tree_deref_slot(slot);
+ if (unlikely(!hwlock))
+ continue;
+
+ if (hwlock->bank->dev->of_node == args.np) {
+ ret = 0;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (ret < 0)
+ goto out;
+
+ id = of_hwspin_lock_simple_xlate(&args);
+ if (id < 0 || id >= hwlock->bank->num_locks) {
+ ret = -EINVAL;
+ goto out;
+ }
+ id += hwlock->bank->base_id;
+
+out:
+ of_node_put(args.np);
+ return ret ? ret : id;
+}
+EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
+
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 47a275c6ece1..ad2f8cac8487 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -1,7 +1,7 @@
/*
* OMAP hardware spinlock driver
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "hwspinlock_internal.h"
@@ -80,14 +81,16 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
- struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
int num_locks, i, ret;
+ /* Only a single hwspinlock block device is supported */
+ int base_id = 0;
- if (!pdata)
+ if (!node)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -141,7 +144,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
- pdata->base_id, num_locks);
+ base_id, num_locks);
if (ret)
goto reg_fail;
@@ -174,11 +177,18 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_hwspinlock_of_match[] = {
+ { .compatible = "ti,omap4-hwspinlock", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
+
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
+ .of_match_table = of_match_ptr(omap_hwspinlock_of_match),
},
};
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
new file mode 100644
index 000000000000..c752447fbac7
--- /dev/null
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications AB
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "hwspinlock_internal.h"
+
+#define QCOM_MUTEX_APPS_PROC_ID 1
+#define QCOM_MUTEX_NUM_LOCKS 32
+
+static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct regmap_field *field = lock->priv;
+ u32 lock_owner;
+ int ret;
+
+ ret = regmap_field_write(field, QCOM_MUTEX_APPS_PROC_ID);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_read(field, &lock_owner);
+ if (ret)
+ return ret;
+
+ return lock_owner == QCOM_MUTEX_APPS_PROC_ID;
+}
+
+static void qcom_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ struct regmap_field *field = lock->priv;
+ u32 lock_owner;
+ int ret;
+
+ ret = regmap_field_read(field, &lock_owner);
+ if (ret) {
+ pr_err("%s: unable to query spinlock owner\n", __func__);
+ return;
+ }
+
+ if (lock_owner != QCOM_MUTEX_APPS_PROC_ID) {
+ pr_err("%s: spinlock not owned by us (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ ret = regmap_field_write(field, 0);
+ if (ret)
+ pr_err("%s: failed to unlock spinlock\n", __func__);
+}
+
+static const struct hwspinlock_ops qcom_hwspinlock_ops = {
+ .trylock = qcom_hwspinlock_trylock,
+ .unlock = qcom_hwspinlock_unlock,
+};
+
+static const struct of_device_id qcom_hwspinlock_of_match[] = {
+ { .compatible = "qcom,sfpb-mutex" },
+ { .compatible = "qcom,tcsr-mutex" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank;
+ struct device_node *syscon;
+ struct reg_field field;
+ struct regmap *regmap;
+ size_t array_size;
+ u32 stride;
+ u32 base;
+ int ret;
+ int i;
+
+ syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
+ if (!syscon) {
+ dev_err(&pdev->dev, "no syscon property\n");
+ return -ENODEV;
+ }
+
+ regmap = syscon_node_to_regmap(syscon);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in syscon\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no stride syscon\n");
+ return -EINVAL;
+ }
+
+ array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
+ bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bank);
+
+ for (i = 0; i < QCOM_MUTEX_NUM_LOCKS; i++) {
+ field.reg = base + i * stride;
+ field.lsb = 0;
+ field.msb = 31;
+
+ bank->lock[i].priv = devm_regmap_field_alloc(&pdev->dev,
+ regmap, field);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
+ 0, QCOM_MUTEX_NUM_LOCKS);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int qcom_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = hwspin_lock_unregister(bank);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver qcom_hwspinlock_driver = {
+ .probe = qcom_hwspinlock_probe,
+ .remove = qcom_hwspinlock_remove,
+ .driver = {
+ .name = "qcom_hwspinlock",
+ .of_match_table = qcom_hwspinlock_of_match,
+ },
+};
+
+static int __init qcom_hwspinlock_init(void)
+{
+ return platform_driver_register(&qcom_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(qcom_hwspinlock_init);
+
+static void __exit qcom_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&qcom_hwspinlock_driver);
+}
+module_exit(qcom_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for Qualcomm SoCs");
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
new file mode 100644
index 000000000000..16018544d431
--- /dev/null
+++ b/drivers/hwspinlock/sirf_hwspinlock.c
@@ -0,0 +1,136 @@
+/*
+ * SIRF hardware spinlock driver
+ *
+ * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hwspinlock_internal.h"
+
+struct sirf_hwspinlock {
+ void __iomem *io_base;
+ struct hwspinlock_device bank;
+};
+
+/* Number of Hardware Spinlocks*/
+#define HW_SPINLOCK_NUMBER 30
+
+/* Hardware spinlock register offsets */
+#define HW_SPINLOCK_BASE 0x404
+#define HW_SPINLOCK_OFFSET(x) (HW_SPINLOCK_BASE + 0x4 * (x))
+
+static int sirf_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ /* attempt to acquire the lock by reading value == 1 from it */
+ return !!readl(lock_addr);
+}
+
+static void sirf_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ /* release the lock by writing 0 to it */
+ writel(0, lock_addr);
+}
+
+static const struct hwspinlock_ops sirf_hwspinlock_ops = {
+ .trylock = sirf_hwspinlock_trylock,
+ .unlock = sirf_hwspinlock_unlock,
+};
+
+static int sirf_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct sirf_hwspinlock *hwspin;
+ struct hwspinlock *hwlock;
+ int idx, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hwspin = devm_kzalloc(&pdev->dev, sizeof(*hwspin) +
+ sizeof(*hwlock) * HW_SPINLOCK_NUMBER, GFP_KERNEL);
+ if (!hwspin)
+ return -ENOMEM;
+
+ /* retrieve io base */
+ hwspin->io_base = of_iomap(pdev->dev.of_node, 0);
+ if (!hwspin->io_base)
+ return -ENOMEM;
+
+ for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
+ hwlock = &hwspin->bank.lock[idx];
+ hwlock->priv = hwspin->io_base + HW_SPINLOCK_OFFSET(idx);
+ }
+
+ platform_set_drvdata(pdev, hwspin);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(&hwspin->bank, &pdev->dev,
+ &sirf_hwspinlock_ops, 0,
+ HW_SPINLOCK_NUMBER);
+ if (ret)
+ goto reg_failed;
+
+ return 0;
+
+reg_failed:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(hwspin->io_base);
+
+ return ret;
+}
+
+static int sirf_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct sirf_hwspinlock *hwspin = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = hwspin_lock_unregister(&hwspin->bank);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ iounmap(hwspin->io_base);
+
+ return 0;
+}
+
+static const struct of_device_id sirf_hwpinlock_ids[] = {
+ { .compatible = "sirf,hwspinlock", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
+
+static struct platform_driver sirf_hwspinlock_driver = {
+ .probe = sirf_hwspinlock_probe,
+ .remove = sirf_hwspinlock_remove,
+ .driver = {
+ .name = "atlas7_hwspinlock",
+ .of_match_table = of_match_ptr(sirf_hwpinlock_ids),
+ },
+};
+
+module_platform_driver(sirf_hwspinlock_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SIRF Hardware spinlock driver");
+MODULE_AUTHOR("Wei Chen <wei.chen@csr.com>");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 35ac23768ce9..577d58d1f1a1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -633,6 +633,7 @@ config I2C_MPC
config I2C_MT65XX
tristate "MediaTek I2C adapter"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
help
This selects the MediaTek(R) Integrated Inter Circuit bus driver
for MT65xx and MT81xx.
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 19b2d689a5ef..f325663c27c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
if (IS_ERR(i2c->clk))
return PTR_ERR(i2c->clk);
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clk_freq)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &clk_freq);
+ if (ret) {
dev_err(&pdev->dev, "clock-frequency not specified in DT");
- return clk_freq;
+ goto err;
}
i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto err;
- }
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dcca7076231e..1c9cb65ac4cf 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
rc = i2c_add_adapter(adapter);
if (rc) {
dev_err(&pdev->dev, "Adapter registeration failed\n");
+ mbox_free_channel(ctx->mbox_chan);
return rc;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 069a41f116dd..e6d4935161e4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1012,6 +1012,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ if (client->dev.of_node)
+ of_node_clear_flag(client->dev.of_node, OF_POPULATED);
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1320,8 +1322,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node)
+ for_each_available_child_of_node(adap->dev.of_node, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
of_i2c_register_device(adap, node);
+ }
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -1853,6 +1858,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (adap == NULL)
return NOTIFY_OK; /* not for us */
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&adap->dev);
+ return NOTIFY_OK;
+ }
+
client = of_i2c_register_device(adap, rd->dn);
put_device(&adap->dev);
@@ -1863,6 +1873,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
}
break;
case OF_RECONFIG_CHANGE_REMOVE:
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
client = of_find_i2c_device_by_node(rd->dn);
if (client == NULL)
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 1ca8e32a9592..25422a3a7238 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
+ if (simple_positive(tmp)) {
dget_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index bdd5d3857203..13ef22bd9459 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
+ if (simple_positive(tmp)) {
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
simple_unlink(d_inode(parent), tmp);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f3b7a34e10d8..771700963127 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1356,7 +1356,7 @@ sequence_cmd:
if (!rc && dump_payload == false && unsol_data)
iscsit_set_unsoliticed_dataout(cmd);
else if (dump_payload && imm_data)
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
@@ -1781,7 +1781,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
struct se_cmd *se_cmd = &cmd->se_cmd;
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
}
}
@@ -1954,7 +1954,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
spin_unlock_bh(&cmd->istate_lock);
if (ret) {
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
transport_send_check_condition_and_sense(se_cmd,
se_cmd->pi_err, 0);
} else {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 4556cd11288e..82897ca17f32 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -47,7 +47,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "ib_srpt.h"
/* Name of this kernel module. */
@@ -94,7 +93,6 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
-static const struct target_core_fabric_ops srpt_template;
static void srpt_release_channel(struct srpt_rdma_ch *ch);
static int srpt_queue_status(struct se_cmd *cmd);
@@ -1336,12 +1334,12 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
BUG_ON(ch->sess == NULL);
- target_put_sess_cmd(ch->sess, &ioctx->cmd);
+ target_put_sess_cmd(&ioctx->cmd);
goto out;
}
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
- ioctx->tag);
+ ioctx->cmd.tag);
switch (state) {
case SRPT_STATE_NEW:
@@ -1367,11 +1365,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+ target_put_sess_cmd(&ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+ target_put_sess_cmd(&ioctx->cmd);
break;
default:
WARN(1, "Unexpected command state (%d)", state);
@@ -1389,7 +1387,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
{
struct srpt_send_ioctx *ioctx;
enum srpt_command_state state;
- struct se_cmd *cmd;
u32 index;
atomic_inc(&ch->sq_wr_avail);
@@ -1397,7 +1394,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
index = idx_from_wr_id(wr_id);
ioctx = ch->ioctx_ring[index];
state = srpt_get_cmd_state(ioctx);
- cmd = &ioctx->cmd;
WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
&& state != SRPT_STATE_MGMT_RSP_SENT
@@ -1474,10 +1470,8 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx,
enum srpt_opcode opcode)
{
- struct se_cmd *cmd;
enum srpt_command_state state;
- cmd = &ioctx->cmd;
state = srpt_get_cmd_state(ioctx);
switch (opcode) {
case SRPT_RDMA_READ_LAST:
@@ -1681,7 +1675,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
- return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+ return target_put_sess_cmd(&ioctx->cmd);
}
/**
@@ -1703,7 +1697,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
srp_cmd = recv_ioctx->ioctx.buf;
cmd = &send_ioctx->cmd;
- send_ioctx->tag = srp_cmd->tag;
+ cmd->tag = srp_cmd->tag;
switch (srp_cmd->task_attr) {
case SRP_CMD_SIMPLE_Q:
@@ -1774,7 +1768,7 @@ static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
for (i = 0; i < ch->rq_size; ++i) {
target = ch->ioctx_ring[i];
if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->tag == tag &&
+ target->cmd.tag == tag &&
srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
ret = 0;
/* now let the target core abort &target->cmd; */
@@ -1833,7 +1827,7 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
- send_ioctx->tag = srp_tsk->tag;
+ send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
send_ioctx->cmd.se_tmr_req->response =
@@ -2180,12 +2174,9 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
*/
static void __srpt_close_ch(struct srpt_rdma_ch *ch)
{
- struct srpt_device *sdev;
enum rdma_ch_state prev_state;
unsigned long flags;
- sdev = ch->sport->sdev;
-
spin_lock_irqsave(&ch->spinlock, flags);
prev_state = ch->state;
switch (prev_state) {
@@ -2983,7 +2974,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
case CH_DRAINING:
case CH_RELEASING:
pr_debug("cmd with tag %lld: channel disconnecting\n",
- ioctx->tag);
+ ioctx->cmd.tag);
srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
ret = -EINVAL;
goto out;
@@ -3058,27 +3049,27 @@ static void srpt_queue_response(struct se_cmd *cmd)
ret = srpt_xfer_data(ch, ioctx);
if (ret) {
pr_err("xfer_data failed for tag %llu\n",
- ioctx->tag);
+ ioctx->cmd.tag);
return;
}
}
if (state != SRPT_STATE_MGMT)
- resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+ resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
cmd->scsi_status);
else {
srp_tm_status
= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
- ioctx->tag);
+ ioctx->cmd.tag);
}
ret = srpt_post_send(ch, ioctx, resp_len);
if (ret) {
pr_err("sending cmd response failed for tag %llu\n",
- ioctx->tag);
+ ioctx->cmd.tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+ target_put_sess_cmd(&ioctx->cmd);
}
}
@@ -3398,11 +3389,6 @@ static char *srpt_get_fabric_name(void)
return "srpt";
}
-static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- return SCSI_TRANSPORTID_PROTOCOLID_SRP;
-}
-
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
@@ -3415,69 +3401,6 @@ static u16 srpt_get_tag(struct se_portal_group *tpg)
return 1;
}
-static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code, unsigned char *buf)
-{
- struct srpt_node_acl *nacl;
- struct spc_rdma_transport_id *tr_id;
-
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- tr_id = (void *)buf;
- tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
- memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
- return sizeof(*tr_id);
-}
-
-static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- return sizeof(struct spc_rdma_transport_id);
-}
-
-static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
- const char *buf, u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct spc_rdma_transport_id *tr_id;
-
- *port_nexus_ptr = NULL;
- *out_tid_len = sizeof(struct spc_rdma_transport_id);
- tr_id = (void *)buf;
- return (char *)tr_id->i_port_id;
-}
-
-static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct srpt_node_acl *nacl;
-
- nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct srpt_node_acl\n");
- return NULL;
- }
-
- return &nacl->nacl;
-}
-
-static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct srpt_node_acl *nacl;
-
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- kfree(nacl);
-}
-
static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -3551,14 +3474,6 @@ static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
{
}
-static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return ioctx->tag;
-}
-
/* Note: only used from inside debug printk's by the TCM core. */
static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
{
@@ -3601,40 +3516,19 @@ out:
* configfs callback function invoked for
* mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*/
-static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
- struct config_group *group,
- const char *name)
+static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
- struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct srpt_node_acl *nacl;
- int ret = 0;
- u32 nexus_depth = 1;
+ struct srpt_port *sport =
+ container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1);
+ struct srpt_node_acl *nacl =
+ container_of(se_nacl, struct srpt_node_acl, nacl);
u8 i_port_id[16];
if (srpt_parse_i_port_id(i_port_id, name) < 0) {
pr_err("invalid initiator port ID %s\n", name);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
- se_nacl_new = srpt_alloc_fabric_acl(tpg);
- if (!se_nacl_new) {
- ret = -ENOMEM;
- goto err;
- }
- /*
- * nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a node ACL from demo mode to explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
- nexus_depth);
- if (IS_ERR(se_nacl)) {
- ret = PTR_ERR(se_nacl);
- goto err;
- }
- /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
nacl->sport = sport;
@@ -3642,29 +3536,22 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
list_add_tail(&nacl->list, &sport->port_acl_list);
spin_unlock_irq(&sport->port_acl_lock);
- return se_nacl;
-err:
- return ERR_PTR(ret);
+ return 0;
}
/*
* configfs callback function invoked for
* rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*/
-static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl)
{
- struct srpt_node_acl *nacl;
- struct srpt_device *sdev;
- struct srpt_port *sport;
+ struct srpt_node_acl *nacl =
+ container_of(se_nacl, struct srpt_node_acl, nacl);
+ struct srpt_port *sport = nacl->sport;
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- sport = nacl->sport;
- sdev = sport->sdev;
spin_lock_irq(&sport->port_acl_lock);
list_del(&nacl->list);
spin_unlock_irq(&sport->port_acl_lock);
- core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
- srpt_release_fabric_acl(NULL, se_nacl);
}
static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
@@ -3849,8 +3736,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
int res;
/* Initialize sport->port_wwn and sport->port_tpg_1 */
- res = core_tpg_register(&srpt_template, &sport->port_wwn,
- &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+ res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
if (res)
return ERR_PTR(res);
@@ -3920,20 +3806,14 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
static const struct target_core_fabric_ops srpt_template = {
.module = THIS_MODULE,
.name = "srpt",
+ .node_acl_size = sizeof(struct srpt_node_acl),
.get_fabric_name = srpt_get_fabric_name,
- .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
.tpg_get_wwn = srpt_get_fabric_wwn,
.tpg_get_tag = srpt_get_tag,
- .tpg_get_default_depth = srpt_get_default_depth,
- .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
.tpg_check_demo_mode = srpt_check_false,
.tpg_check_demo_mode_cache = srpt_check_true,
.tpg_check_demo_mode_write_protect = srpt_check_true,
.tpg_check_prod_mode_write_protect = srpt_check_false,
- .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
- .tpg_release_fabric_acl = srpt_release_fabric_acl,
.tpg_get_inst_index = srpt_tpg_get_inst_index,
.release_cmd = srpt_release_cmd,
.check_stop_free = srpt_check_stop_free,
@@ -3944,7 +3824,6 @@ static const struct target_core_fabric_ops srpt_template = {
.write_pending = srpt_write_pending,
.write_pending_status = srpt_write_pending_status,
.set_default_node_attributes = srpt_set_default_node_attrs,
- .get_task_tag = srpt_get_task_tag,
.get_cmd_state = srpt_get_tcm_cmd_state,
.queue_data_in = srpt_queue_data_in,
.queue_status = srpt_queue_status,
@@ -3958,12 +3837,8 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_drop_wwn = srpt_drop_tport,
.fabric_make_tpg = srpt_make_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = srpt_make_nodeacl,
- .fabric_drop_nodeacl = srpt_drop_nodeacl,
+ .fabric_init_nodeacl = srpt_init_nodeacl,
+ .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_base_attrs = srpt_tpg_attrs,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index d85c0c205625..21f8df67522a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -238,7 +238,6 @@ struct srpt_send_ioctx {
bool rdma_aborted;
struct se_cmd cmd;
struct completion tx_done;
- u64 tag;
int sg_cnt;
int mapped_sg_count;
u16 n_rdma_ius;
@@ -410,34 +409,16 @@ struct srpt_device {
/**
* struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @nacl: Target core node ACL information.
* @i_port_id: 128-bit SRP initiator port ID.
* @sport: port information.
- * @nacl: Target core node ACL information.
* @list: Element of the per-HCA ACL list.
*/
struct srpt_node_acl {
+ struct se_node_acl nacl;
u8 i_port_id[16];
struct srpt_port *sport;
- struct se_node_acl nacl;
struct list_head list;
};
-/*
- * SRP-releated SCSI persistent reservation definitions.
- *
- * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
- * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
- * SCSI over an RDMA interface).
- */
-
-enum {
- SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
-};
-
-struct spc_rdma_transport_id {
- uint8_t protocol_identifier;
- uint8_t reserved[7];
- uint8_t i_port_id[16];
-};
-
#endif /* IB_SRPT_H */
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f31578423636..78d24990a816 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -677,12 +677,9 @@ static void input_dev_release_keys(struct input_dev *dev)
int code;
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
- for (code = 0; code <= KEY_MAX; code++) {
- if (is_event_supported(code, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(code, dev->key)) {
- input_pass_event(dev, EV_KEY, code, 0);
- }
- }
+ for_each_set_bit(code, dev->key, KEY_CNT)
+ input_pass_event(dev, EV_KEY, code, 0);
+ memset(dev->key, 0, sizeof(dev->key));
input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
}
}
@@ -1626,10 +1623,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
if (!test_bit(EV_##type, dev->evbit)) \
break; \
\
- for (i = 0; i < type##_MAX; i++) { \
- if (!test_bit(i, dev->bits##bit)) \
- continue; \
- \
+ for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
active = test_bit(i, dev->bits); \
if (!active && !on) \
continue; \
@@ -1980,22 +1974,12 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
- if (test_bit(EV_ABS, dev->evbit)) {
- for (i = 0; i < ABS_CNT; i++) {
- if (test_bit(i, dev->absbit)) {
- if (input_is_mt_axis(i))
- events += mt_slots;
- else
- events++;
- }
- }
- }
+ if (test_bit(EV_ABS, dev->evbit))
+ for_each_set_bit(i, dev->absbit, ABS_CNT)
+ events += input_is_mt_axis(i) ? mt_slots : 1;
- if (test_bit(EV_REL, dev->evbit)) {
- for (i = 0; i < REL_CNT; i++)
- if (test_bit(i, dev->relbit))
- events++;
- }
+ if (test_bit(EV_REL, dev->evbit))
+ events += bitmap_weight(dev->relbit, REL_CNT);
/* Make room for KEY and MSC events */
events += 7;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 61c761156371..f8850f9cb331 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -344,6 +344,7 @@ struct usb_xpad {
int mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
+ unsigned long led_no; /* led to lit on xbox360 controllers */
};
/*
@@ -488,6 +489,8 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
input_sync(dev);
}
+static void xpad_identify_controller(struct usb_xpad *xpad);
+
/*
* xpad360w_process_packet
*
@@ -510,6 +513,11 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
if (data[1] & 0x80) {
xpad->pad_present = 1;
usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
+ /*
+ * Light up the segment corresponding to
+ * controller number.
+ */
+ xpad_identify_controller(xpad);
} else
xpad->pad_present = 0;
}
@@ -881,17 +889,63 @@ struct xpad_led {
struct usb_xpad *xpad;
};
+/**
+ * @param command
+ * 0: off
+ * 1: all blink, then previous setting
+ * 2: 1/top-left blink, then on
+ * 3: 2/top-right blink, then on
+ * 4: 3/bottom-left blink, then on
+ * 5: 4/bottom-right blink, then on
+ * 6: 1/top-left on
+ * 7: 2/top-right on
+ * 8: 3/bottom-left on
+ * 9: 4/bottom-right on
+ * 10: rotate
+ * 11: blink, based on previous setting
+ * 12: slow blink, based on previous setting
+ * 13: rotate with two lights
+ * 14: persistent slow all blink
+ * 15: blink once, then previous setting
+ */
static void xpad_send_led_command(struct usb_xpad *xpad, int command)
{
- if (command >= 0 && command < 14) {
- mutex_lock(&xpad->odata_mutex);
+ command %= 16;
+
+ mutex_lock(&xpad->odata_mutex);
+
+ switch (xpad->xtype) {
+ case XTYPE_XBOX360:
xpad->odata[0] = 0x01;
xpad->odata[1] = 0x03;
xpad->odata[2] = command;
xpad->irq_out->transfer_buffer_length = 3;
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- mutex_unlock(&xpad->odata_mutex);
+ break;
+ case XTYPE_XBOX360W:
+ xpad->odata[0] = 0x00;
+ xpad->odata[1] = 0x00;
+ xpad->odata[2] = 0x08;
+ xpad->odata[3] = 0x40 + command;
+ xpad->odata[4] = 0x00;
+ xpad->odata[5] = 0x00;
+ xpad->odata[6] = 0x00;
+ xpad->odata[7] = 0x00;
+ xpad->odata[8] = 0x00;
+ xpad->odata[9] = 0x00;
+ xpad->odata[10] = 0x00;
+ xpad->odata[11] = 0x00;
+ xpad->irq_out->transfer_buffer_length = 12;
+ break;
}
+
+ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ mutex_unlock(&xpad->odata_mutex);
+}
+
+static void xpad_identify_controller(struct usb_xpad *xpad)
+{
+ /* Light up the segment corresponding to controller number */
+ xpad_send_led_command(xpad, (xpad->led_no % 4) + 2);
}
static void xpad_led_set(struct led_classdev *led_cdev,
@@ -905,22 +959,21 @@ static void xpad_led_set(struct led_classdev *led_cdev,
static int xpad_led_probe(struct usb_xpad *xpad)
{
- static atomic_t led_seq = ATOMIC_INIT(-1);
- unsigned long led_no;
+ static atomic_t led_seq = ATOMIC_INIT(-1);
struct xpad_led *led;
struct led_classdev *led_cdev;
int error;
- if (xpad->xtype != XTYPE_XBOX360)
+ if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX360W)
return 0;
xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led_no = atomic_inc_return(&led_seq);
+ xpad->led_no = atomic_inc_return(&led_seq);
- snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
+ snprintf(led->name, sizeof(led->name), "xpad%lu", xpad->led_no);
led->xpad = xpad;
led_cdev = &led->led_cdev;
@@ -934,10 +987,8 @@ static int xpad_led_probe(struct usb_xpad *xpad)
return error;
}
- /*
- * Light up the segment corresponding to controller number
- */
- xpad_send_led_command(xpad, (led_no % 4) + 2);
+ /* Light up the segment corresponding to controller number */
+ xpad_identify_controller(xpad);
return 0;
}
@@ -954,6 +1005,7 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
#else
static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
static void xpad_led_disconnect(struct usb_xpad *xpad) { }
+static void xpad_identify_controller(struct usb_xpad *xpad) { }
#endif
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 2e855e6f3565..d2ea863d6a45 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -506,7 +506,9 @@ static int imx_keypad_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad);
/* Ensure that the keypad will stay dormant until opened */
- clk_prepare_enable(keypad->clk);
+ error = clk_prepare_enable(keypad->clk);
+ if (error)
+ return error;
imx_keypad_inhibit(keypad);
clk_disable_unprepare(keypad->clk);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index f1c844739cd7..10e140af5aac 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -167,9 +167,13 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
struct input_dev *idev = pwr;
struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
- if (irq == axp20x_pek->irq_dbr)
+ /*
+ * The power-button is connected to ground so a falling edge (dbf)
+ * means it is pressed.
+ */
+ if (irq == axp20x_pek->irq_dbf)
input_report_key(idev, KEY_POWER, true);
- else if (irq == axp20x_pek->irq_dbf)
+ else if (irq == axp20x_pek->irq_dbr)
input_report_key(idev, KEY_POWER, false);
input_sync(idev);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 62641f2adaf7..5b5f403d8ce6 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -771,7 +771,7 @@ static const struct attribute_group *elan_sysfs_groups[] = {
*/
static void elan_report_contact(struct elan_tp_data *data,
int contact_num, bool contact_valid,
- bool hover_event, u8 *finger_data)
+ u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
@@ -815,9 +815,7 @@ static void elan_report_contact(struct elan_tp_data *data,
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
- input_report_abs(input, ABS_MT_DISTANCE, hover_event);
- input_report_abs(input, ABS_MT_PRESSURE,
- hover_event ? 0 : scaled_pressure);
+ input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +837,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, hover_event,
- finger_data);
+ elan_report_contact(data, i, contact_valid, finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
}
input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_report_abs(input, ABS_DISTANCE, hover_event != 0);
input_mt_report_pointer_emulation(input, true);
input_sync(input);
}
@@ -922,6 +920,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_Y, data->y_res);
input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
/* And MT parameters */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +933,6 @@ static int elan_setup_input_device(struct elan_tp_data *data)
ETP_FINGER_WIDTH * max_width, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
ETP_FINGER_WIDTH * min_width, 0, 0);
- input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
data->input = input;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 35c8d0ceabee..3a32caf06bf1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
- input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 77833d7a004b..200841b77edb 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -244,6 +244,7 @@ config SERIO_PS2MULT
config SERIO_ARC_PS2
tristate "ARC PS/2 support"
+ depends on HAS_IOMEM
help
Say Y here if you have an ARC FPGA platform with a PS/2
controller in it.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index d20fe1dff403..a854c6e5f09e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -658,6 +658,18 @@ config TOUCHSCREEN_PIXCIR
To compile this driver as a module, choose M here: the
module will be called pixcir_i2c_ts.
+config TOUCHSCREEN_WDT87XX_I2C
+ tristate "Weida HiTech I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Weida WDT87XX I2C touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wdt87xx_i2c.
+
config TOUCHSCREEN_WM831X
tristate "Support for WM831x touchscreen controllers"
depends on MFD_WM831X
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 44deea743d02..fa3d33bac7fc 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_I2C) += wacom_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_WDT87XX_I2C) += wdt87xx_i2c.o
obj-$(CONFIG_TOUCHSCREEN_WM831X) += wm831x-ts.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index e6aef3e48bd9..394b1de9a2a3 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1035,20 +1035,15 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->dev.parent = &client->dev;
- __set_bit(EV_KEY, input->evbit);
- __set_bit(EV_ABS, input->evbit);
- __set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, tsdata->num_x * 64 - 1, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, tsdata->num_y * 64 - 1, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
0, tsdata->num_x * 64 - 1, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
0, tsdata->num_y * 64 - 1, 0, 0);
if (!pdata)
- touchscreen_parse_of_params(input);
+ touchscreen_parse_of_params(input, true);
- error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0);
+ error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
if (error) {
dev_err(&client->dev, "Unable to init MT slots.\n");
return error;
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index b82b5207c78b..806cd0ad160f 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -14,14 +14,22 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-static u32 of_get_optional_u32(struct device_node *np,
- const char *property)
+static bool touchscreen_get_prop_u32(struct device_node *np,
+ const char *property,
+ unsigned int default_value,
+ unsigned int *value)
{
- u32 val = 0;
+ u32 val;
+ int error;
- of_property_read_u32(np, property, &val);
+ error = of_property_read_u32(np, property, &val);
+ if (error) {
+ *value = default_value;
+ return false;
+ }
- return val;
+ *value = val;
+ return true;
}
static void touchscreen_set_params(struct input_dev *dev,
@@ -54,34 +62,45 @@ static void touchscreen_set_params(struct input_dev *dev,
* input device accordingly. The function keeps previously setuped default
* values if no value is specified via DT.
*/
-void touchscreen_parse_of_params(struct input_dev *dev)
+void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
{
struct device_node *np = dev->dev.parent->of_node;
- u32 maximum, fuzz;
+ unsigned int axis;
+ unsigned int maximum, fuzz;
+ bool data_present;
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
- maximum = of_get_optional_u32(np, "touchscreen-size-x");
- fuzz = of_get_optional_u32(np, "touchscreen-fuzz-x");
- if (maximum || fuzz) {
- touchscreen_set_params(dev, ABS_X, maximum, fuzz);
- touchscreen_set_params(dev, ABS_MT_POSITION_X, maximum, fuzz);
- }
+ axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
+ data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
+ input_abs_get_max(dev, axis),
+ &maximum) |
+ touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
+ input_abs_get_fuzz(dev, axis),
+ &fuzz);
+ if (data_present)
+ touchscreen_set_params(dev, axis, maximum, fuzz);
- maximum = of_get_optional_u32(np, "touchscreen-size-y");
- fuzz = of_get_optional_u32(np, "touchscreen-fuzz-y");
- if (maximum || fuzz) {
- touchscreen_set_params(dev, ABS_Y, maximum, fuzz);
- touchscreen_set_params(dev, ABS_MT_POSITION_Y, maximum, fuzz);
- }
+ axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
+ data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
+ input_abs_get_max(dev, axis),
+ &maximum) |
+ touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
+ input_abs_get_fuzz(dev, axis),
+ &fuzz);
+ if (data_present)
+ touchscreen_set_params(dev, axis, maximum, fuzz);
- maximum = of_get_optional_u32(np, "touchscreen-max-pressure");
- fuzz = of_get_optional_u32(np, "touchscreen-fuzz-pressure");
- if (maximum || fuzz) {
- touchscreen_set_params(dev, ABS_PRESSURE, maximum, fuzz);
- touchscreen_set_params(dev, ABS_MT_PRESSURE, maximum, fuzz);
- }
+ axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
+ data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
+ input_abs_get_max(dev, axis),
+ &maximum) |
+ touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
+ input_abs_get_fuzz(dev, axis),
+ &fuzz);
+ if (data_present)
+ touchscreen_set_params(dev, axis, maximum, fuzz);
}
EXPORT_SYMBOL(touchscreen_parse_of_params);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 72657c579430..d8c025b0f88c 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -709,7 +709,7 @@ static int tsc2005_probe(struct spi_device *spi)
input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
if (np)
- touchscreen_parse_of_params(input_dev);
+ touchscreen_parse_of_params(input_dev, false);
input_dev->open = tsc2005_open;
input_dev->close = tsc2005_close;
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
new file mode 100644
index 000000000000..fb92ae1c5fae
--- /dev/null
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -0,0 +1,1149 @@
+/*
+ * Weida HiTech WDT87xx TouchScreen I2C driver
+ *
+ * Copyright (c) 2015 Weida Hi-Tech Co., Ltd.
+ * HN Chen <hn.chen@weidahitech.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/input/mt.h>
+#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
+#define WDT87XX_NAME "wdt87xx_i2c"
+#define WDT87XX_DRV_VER "0.9.6"
+#define WDT87XX_FW_NAME "wdt87xx_fw.bin"
+#define WDT87XX_CFG_NAME "wdt87xx_cfg.bin"
+
+#define MODE_ACTIVE 0x01
+#define MODE_READY 0x02
+#define MODE_IDLE 0x03
+#define MODE_SLEEP 0x04
+#define MODE_STOP 0xFF
+
+#define WDT_MAX_FINGER 10
+#define WDT_RAW_BUF_COUNT 54
+#define WDT_V1_RAW_BUF_COUNT 74
+#define WDT_FIRMWARE_ID 0xa9e368f5
+
+#define PG_SIZE 0x1000
+#define MAX_RETRIES 3
+
+#define MAX_UNIT_AXIS 0x7FFF
+
+#define PKT_READ_SIZE 72
+#define PKT_WRITE_SIZE 80
+
+/* the finger definition of the report event */
+#define FINGER_EV_OFFSET_ID 0
+#define FINGER_EV_OFFSET_X 1
+#define FINGER_EV_OFFSET_Y 3
+#define FINGER_EV_SIZE 5
+
+#define FINGER_EV_V1_OFFSET_ID 0
+#define FINGER_EV_V1_OFFSET_W 1
+#define FINGER_EV_V1_OFFSET_P 2
+#define FINGER_EV_V1_OFFSET_X 3
+#define FINGER_EV_V1_OFFSET_Y 5
+#define FINGER_EV_V1_SIZE 7
+
+/* The definition of a report packet */
+#define TOUCH_PK_OFFSET_REPORT_ID 0
+#define TOUCH_PK_OFFSET_EVENT 1
+#define TOUCH_PK_OFFSET_SCAN_TIME 51
+#define TOUCH_PK_OFFSET_FNGR_NUM 53
+
+#define TOUCH_PK_V1_OFFSET_REPORT_ID 0
+#define TOUCH_PK_V1_OFFSET_EVENT 1
+#define TOUCH_PK_V1_OFFSET_SCAN_TIME 71
+#define TOUCH_PK_V1_OFFSET_FNGR_NUM 73
+
+/* The definition of the controller parameters */
+#define CTL_PARAM_OFFSET_FW_ID 0
+#define CTL_PARAM_OFFSET_PLAT_ID 2
+#define CTL_PARAM_OFFSET_XMLS_ID1 4
+#define CTL_PARAM_OFFSET_XMLS_ID2 6
+#define CTL_PARAM_OFFSET_PHY_CH_X 8
+#define CTL_PARAM_OFFSET_PHY_CH_Y 10
+#define CTL_PARAM_OFFSET_PHY_X0 12
+#define CTL_PARAM_OFFSET_PHY_X1 14
+#define CTL_PARAM_OFFSET_PHY_Y0 16
+#define CTL_PARAM_OFFSET_PHY_Y1 18
+#define CTL_PARAM_OFFSET_PHY_W 22
+#define CTL_PARAM_OFFSET_PHY_H 24
+#define CTL_PARAM_OFFSET_FACTOR 32
+
+/* Communication commands */
+#define PACKET_SIZE 56
+#define VND_REQ_READ 0x06
+#define VND_READ_DATA 0x07
+#define VND_REQ_WRITE 0x08
+
+#define VND_CMD_START 0x00
+#define VND_CMD_STOP 0x01
+#define VND_CMD_RESET 0x09
+
+#define VND_CMD_ERASE 0x1A
+
+#define VND_GET_CHECKSUM 0x66
+
+#define VND_SET_DATA 0x83
+#define VND_SET_COMMAND_DATA 0x84
+#define VND_SET_CHECKSUM_CALC 0x86
+#define VND_SET_CHECKSUM_LENGTH 0x87
+
+#define VND_CMD_SFLCK 0xFC
+#define VND_CMD_SFUNL 0xFD
+
+#define CMD_SFLCK_KEY 0xC39B
+#define CMD_SFUNL_KEY 0x95DA
+
+#define STRIDX_PLATFORM_ID 0x80
+#define STRIDX_PARAMETERS 0x81
+
+#define CMD_BUF_SIZE 8
+#define PKT_BUF_SIZE 64
+
+/* The definition of the command packet */
+#define CMD_REPORT_ID_OFFSET 0x0
+#define CMD_TYPE_OFFSET 0x1
+#define CMD_INDEX_OFFSET 0x2
+#define CMD_KEY_OFFSET 0x3
+#define CMD_LENGTH_OFFSET 0x4
+#define CMD_DATA_OFFSET 0x8
+
+/* The definition of firmware chunk tags */
+#define FOURCC_ID_RIFF 0x46464952
+#define FOURCC_ID_WHIF 0x46494857
+#define FOURCC_ID_FRMT 0x544D5246
+#define FOURCC_ID_FRWR 0x52575246
+#define FOURCC_ID_CNFG 0x47464E43
+
+#define CHUNK_ID_FRMT FOURCC_ID_FRMT
+#define CHUNK_ID_FRWR FOURCC_ID_FRWR
+#define CHUNK_ID_CNFG FOURCC_ID_CNFG
+
+#define FW_FOURCC1_OFFSET 0
+#define FW_SIZE_OFFSET 4
+#define FW_FOURCC2_OFFSET 8
+#define FW_PAYLOAD_OFFSET 40
+
+#define FW_CHUNK_ID_OFFSET 0
+#define FW_CHUNK_SIZE_OFFSET 4
+#define FW_CHUNK_TGT_START_OFFSET 8
+#define FW_CHUNK_PAYLOAD_LEN_OFFSET 12
+#define FW_CHUNK_SRC_START_OFFSET 16
+#define FW_CHUNK_VERSION_OFFSET 20
+#define FW_CHUNK_ATTR_OFFSET 24
+#define FW_CHUNK_PAYLOAD_OFFSET 32
+
+/* Controller requires minimum 300us between commands */
+#define WDT_COMMAND_DELAY_MS 2
+#define WDT_FLASH_WRITE_DELAY_MS 4
+
+struct wdt87xx_sys_param {
+ u16 fw_id;
+ u16 plat_id;
+ u16 xmls_id1;
+ u16 xmls_id2;
+ u16 phy_ch_x;
+ u16 phy_ch_y;
+ u16 phy_w;
+ u16 phy_h;
+ u16 scaling_factor;
+ u32 max_x;
+ u32 max_y;
+};
+
+struct wdt87xx_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ /* Mutex for fw update to prevent concurrent access */
+ struct mutex fw_mutex;
+ struct wdt87xx_sys_param param;
+ u8 phys[32];
+};
+
+static int wdt87xx_i2c_xfer(struct i2c_client *client,
+ void *txdata, size_t txlen,
+ void *rxdata, size_t rxlen)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = txlen,
+ .buf = txdata,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = rxlen,
+ .buf = rxdata,
+ },
+ };
+ int error;
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s: i2c transfer failed: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
+ u8 *buf, size_t len)
+{
+ u8 tx_buf[] = { 0x22, 0x00, 0x13, 0x0E, str_idx, 0x23, 0x00 };
+ u8 rx_buf[PKT_WRITE_SIZE];
+ size_t rx_len = len + 2;
+ int error;
+
+ if (rx_len > sizeof(rx_buf))
+ return -EINVAL;
+
+ error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+ rx_buf, rx_len);
+ if (error) {
+ dev_err(&client->dev, "get string failed: %d\n", error);
+ return error;
+ }
+
+ if (rx_buf[1] != 0x03) {
+ dev_err(&client->dev, "unexpected response to get string: %d\n",
+ rx_buf[1]);
+ return -EINVAL;
+ }
+
+ rx_len = min_t(size_t, len, rx_buf[0]);
+ memcpy(buf, &rx_buf[2], rx_len);
+
+ mdelay(WDT_COMMAND_DELAY_MS);
+
+ return 0;
+}
+
+static int wdt87xx_get_feature(struct i2c_client *client,
+ u8 *buf, size_t buf_size)
+{
+ u8 tx_buf[8];
+ u8 rx_buf[PKT_WRITE_SIZE];
+ size_t tx_len = 0;
+ size_t rx_len = buf_size + 2;
+ int error;
+
+ if (rx_len > sizeof(rx_buf))
+ return -EINVAL;
+
+ /* Get feature command packet */
+ tx_buf[tx_len++] = 0x22;
+ tx_buf[tx_len++] = 0x00;
+ if (buf[CMD_REPORT_ID_OFFSET] > 0xF) {
+ tx_buf[tx_len++] = 0x30;
+ tx_buf[tx_len++] = 0x02;
+ tx_buf[tx_len++] = buf[CMD_REPORT_ID_OFFSET];
+ } else {
+ tx_buf[tx_len++] = 0x30 | buf[CMD_REPORT_ID_OFFSET];
+ tx_buf[tx_len++] = 0x02;
+ }
+ tx_buf[tx_len++] = 0x23;
+ tx_buf[tx_len++] = 0x00;
+
+ error = wdt87xx_i2c_xfer(client, tx_buf, tx_len, rx_buf, rx_len);
+ if (error) {
+ dev_err(&client->dev, "get feature failed: %d\n", error);
+ return error;
+ }
+
+ rx_len = min_t(size_t, buf_size, get_unaligned_le16(rx_buf));
+ memcpy(buf, &rx_buf[2], rx_len);
+
+ mdelay(WDT_COMMAND_DELAY_MS);
+
+ return 0;
+}
+
+static int wdt87xx_set_feature(struct i2c_client *client,
+ const u8 *buf, size_t buf_size)
+{
+ u8 tx_buf[PKT_WRITE_SIZE];
+ int tx_len = 0;
+ int error;
+
+ /* Set feature command packet */
+ tx_buf[tx_len++] = 0x22;
+ tx_buf[tx_len++] = 0x00;
+ if (buf[CMD_REPORT_ID_OFFSET] > 0xF) {
+ tx_buf[tx_len++] = 0x30;
+ tx_buf[tx_len++] = 0x03;
+ tx_buf[tx_len++] = buf[CMD_REPORT_ID_OFFSET];
+ } else {
+ tx_buf[tx_len++] = 0x30 | buf[CMD_REPORT_ID_OFFSET];
+ tx_buf[tx_len++] = 0x03;
+ }
+ tx_buf[tx_len++] = 0x23;
+ tx_buf[tx_len++] = 0x00;
+ tx_buf[tx_len++] = (buf_size & 0xFF);
+ tx_buf[tx_len++] = ((buf_size & 0xFF00) >> 8);
+
+ if (tx_len + buf_size > sizeof(tx_buf))
+ return -EINVAL;
+
+ memcpy(&tx_buf[tx_len], buf, buf_size);
+ tx_len += buf_size;
+
+ error = i2c_master_send(client, tx_buf, tx_len);
+ if (error < 0) {
+ dev_err(&client->dev, "set feature failed: %d\n", error);
+ return error;
+ }
+
+ mdelay(WDT_COMMAND_DELAY_MS);
+
+ return 0;
+}
+
+static int wdt87xx_send_command(struct i2c_client *client, int cmd, int value)
+{
+ u8 cmd_buf[CMD_BUF_SIZE];
+
+ /* Set the command packet */
+ cmd_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_WRITE;
+ cmd_buf[CMD_TYPE_OFFSET] = VND_SET_COMMAND_DATA;
+ put_unaligned_le16((u16)cmd, &cmd_buf[CMD_INDEX_OFFSET]);
+
+ switch (cmd) {
+ case VND_CMD_START:
+ case VND_CMD_STOP:
+ case VND_CMD_RESET:
+ /* Mode selector */
+ put_unaligned_le32((value & 0xFF), &cmd_buf[CMD_LENGTH_OFFSET]);
+ break;
+
+ case VND_CMD_SFLCK:
+ put_unaligned_le16(CMD_SFLCK_KEY, &cmd_buf[CMD_KEY_OFFSET]);
+ break;
+
+ case VND_CMD_SFUNL:
+ put_unaligned_le16(CMD_SFUNL_KEY, &cmd_buf[CMD_KEY_OFFSET]);
+ break;
+
+ case VND_CMD_ERASE:
+ case VND_SET_CHECKSUM_CALC:
+ case VND_SET_CHECKSUM_LENGTH:
+ put_unaligned_le32(value, &cmd_buf[CMD_KEY_OFFSET]);
+ break;
+
+ default:
+ cmd_buf[CMD_REPORT_ID_OFFSET] = 0;
+ dev_err(&client->dev, "Invalid command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ return wdt87xx_set_feature(client, cmd_buf, sizeof(cmd_buf));
+}
+
+static int wdt87xx_sw_reset(struct i2c_client *client)
+{
+ int error;
+
+ dev_dbg(&client->dev, "resetting device now\n");
+
+ error = wdt87xx_send_command(client, VND_CMD_RESET, 0);
+ if (error) {
+ dev_err(&client->dev, "reset failed\n");
+ return error;
+ }
+
+ /* Wait the device to be ready */
+ msleep(200);
+
+ return 0;
+}
+
+static const void *wdt87xx_get_fw_chunk(const struct firmware *fw, u32 id)
+{
+ size_t pos = FW_PAYLOAD_OFFSET;
+ u32 chunk_id, chunk_size;
+
+ while (pos < fw->size) {
+ chunk_id = get_unaligned_le32(fw->data +
+ pos + FW_CHUNK_ID_OFFSET);
+ if (chunk_id == id)
+ return fw->data + pos;
+
+ chunk_size = get_unaligned_le32(fw->data +
+ pos + FW_CHUNK_SIZE_OFFSET);
+ pos += chunk_size + 2 * sizeof(u32); /* chunk ID + size */
+ }
+
+ return NULL;
+}
+
+static int wdt87xx_get_sysparam(struct i2c_client *client,
+ struct wdt87xx_sys_param *param)
+{
+ u8 buf[PKT_READ_SIZE];
+ int error;
+
+ error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
+ if (error) {
+ dev_err(&client->dev, "failed to get parameters\n");
+ return error;
+ }
+
+ param->xmls_id1 = get_unaligned_le16(buf + CTL_PARAM_OFFSET_XMLS_ID1);
+ param->xmls_id2 = get_unaligned_le16(buf + CTL_PARAM_OFFSET_XMLS_ID2);
+ param->phy_ch_x = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_CH_X);
+ param->phy_ch_y = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_CH_Y);
+ param->phy_w = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_W) / 10;
+ param->phy_h = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_H) / 10;
+
+ /* Get the scaling factor of pixel to logical coordinate */
+ param->scaling_factor =
+ get_unaligned_le16(buf + CTL_PARAM_OFFSET_FACTOR);
+
+ param->max_x = MAX_UNIT_AXIS;
+ param->max_y = DIV_ROUND_CLOSEST(MAX_UNIT_AXIS * param->phy_h,
+ param->phy_w);
+
+ error = wdt87xx_get_string(client, STRIDX_PLATFORM_ID, buf, 8);
+ if (error) {
+ dev_err(&client->dev, "failed to get platform id\n");
+ return error;
+ }
+
+ param->plat_id = buf[1];
+
+ buf[0] = 0xf2;
+ error = wdt87xx_get_feature(client, buf, 16);
+ if (error) {
+ dev_err(&client->dev, "failed to get firmware id\n");
+ return error;
+ }
+
+ if (buf[0] != 0xf2) {
+ dev_err(&client->dev, "wrong id of fw response: 0x%x\n",
+ buf[0]);
+ return -EINVAL;
+ }
+
+ param->fw_id = get_unaligned_le16(&buf[1]);
+
+ dev_info(&client->dev,
+ "fw_id: 0x%x, plat_id: 0x%x, xml_id1: %04x, xml_id2: %04x\n",
+ param->fw_id, param->plat_id,
+ param->xmls_id1, param->xmls_id2);
+
+ return 0;
+}
+
+static int wdt87xx_validate_firmware(struct wdt87xx_data *wdt,
+ const struct firmware *fw)
+{
+ const void *fw_chunk;
+ u32 data1, data2;
+ u32 size;
+ u8 fw_chip_id;
+ u8 chip_id;
+
+ data1 = get_unaligned_le32(fw->data + FW_FOURCC1_OFFSET);
+ data2 = get_unaligned_le32(fw->data + FW_FOURCC2_OFFSET);
+ if (data1 != FOURCC_ID_RIFF || data2 != FOURCC_ID_WHIF) {
+ dev_err(&wdt->client->dev, "check fw tag failed\n");
+ return -EINVAL;
+ }
+
+ size = get_unaligned_le32(fw->data + FW_SIZE_OFFSET);
+ if (size != fw->size) {
+ dev_err(&wdt->client->dev,
+ "fw size mismatch: expected %d, actual %zu\n",
+ size, fw->size);
+ return -EINVAL;
+ }
+
+ /*
+ * Get the chip_id from the firmware. Make sure that it is the
+ * right controller to do the firmware and config update.
+ */
+ fw_chunk = wdt87xx_get_fw_chunk(fw, CHUNK_ID_FRWR);
+ if (!fw_chunk) {
+ dev_err(&wdt->client->dev,
+ "unable to locate firmware chunk\n");
+ return -EINVAL;
+ }
+
+ fw_chip_id = (get_unaligned_le32(fw_chunk +
+ FW_CHUNK_VERSION_OFFSET) >> 12) & 0xF;
+ chip_id = (wdt->param.fw_id >> 12) & 0xF;
+
+ if (fw_chip_id != chip_id) {
+ dev_err(&wdt->client->dev,
+ "fw version mismatch: fw %d vs. chip %d\n",
+ fw_chip_id, chip_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_validate_fw_chunk(const void *data, int id)
+{
+ if (id == CHUNK_ID_FRWR) {
+ u32 fw_id;
+
+ fw_id = get_unaligned_le32(data + FW_CHUNK_PAYLOAD_OFFSET);
+ if (fw_id != WDT_FIRMWARE_ID)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_write_data(struct i2c_client *client, const char *data,
+ u32 address, int length)
+{
+ u16 packet_size;
+ int count = 0;
+ int error;
+ u8 pkt_buf[PKT_BUF_SIZE];
+
+ /* Address and length should be 4 bytes aligned */
+ if ((address & 0x3) != 0 || (length & 0x3) != 0) {
+ dev_err(&client->dev,
+ "addr & len must be 4 bytes aligned %x, %x\n",
+ address, length);
+ return -EINVAL;
+ }
+
+ while (length) {
+ packet_size = min(length, PACKET_SIZE);
+
+ pkt_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_WRITE;
+ pkt_buf[CMD_TYPE_OFFSET] = VND_SET_DATA;
+ put_unaligned_le16(packet_size, &pkt_buf[CMD_INDEX_OFFSET]);
+ put_unaligned_le32(address, &pkt_buf[CMD_LENGTH_OFFSET]);
+ memcpy(&pkt_buf[CMD_DATA_OFFSET], data, packet_size);
+
+ error = wdt87xx_set_feature(client, pkt_buf, sizeof(pkt_buf));
+ if (error)
+ return error;
+
+ length -= packet_size;
+ data += packet_size;
+ address += packet_size;
+
+ /* Wait for the controller to finish the write */
+ mdelay(WDT_FLASH_WRITE_DELAY_MS);
+
+ if ((++count % 32) == 0) {
+ /* Delay for fw to clear watch dog */
+ msleep(20);
+ }
+ }
+
+ return 0;
+}
+
+static u16 misr(u16 cur_value, u8 new_value)
+{
+ u32 a, b;
+ u32 bit0;
+ u32 y;
+
+ a = cur_value;
+ b = new_value;
+ bit0 = a ^ (b & 1);
+ bit0 ^= a >> 1;
+ bit0 ^= a >> 2;
+ bit0 ^= a >> 4;
+ bit0 ^= a >> 5;
+ bit0 ^= a >> 7;
+ bit0 ^= a >> 11;
+ bit0 ^= a >> 15;
+ y = (a << 1) ^ b;
+ y = (y & ~1) | (bit0 & 1);
+
+ return (u16)y;
+}
+
+static u16 wdt87xx_calculate_checksum(const u8 *data, size_t length)
+{
+ u16 checksum = 0;
+ size_t i;
+
+ for (i = 0; i < length; i++)
+ checksum = misr(checksum, data[i]);
+
+ return checksum;
+}
+
+static int wdt87xx_get_checksum(struct i2c_client *client, u16 *checksum,
+ u32 address, int length)
+{
+ int error;
+ int time_delay;
+ u8 pkt_buf[PKT_BUF_SIZE];
+ u8 cmd_buf[CMD_BUF_SIZE];
+
+ error = wdt87xx_send_command(client, VND_SET_CHECKSUM_LENGTH, length);
+ if (error) {
+ dev_err(&client->dev, "failed to set checksum length\n");
+ return error;
+ }
+
+ error = wdt87xx_send_command(client, VND_SET_CHECKSUM_CALC, address);
+ if (error) {
+ dev_err(&client->dev, "failed to set checksum address\n");
+ return error;
+ }
+
+ /* Wait the operation to complete */
+ time_delay = DIV_ROUND_UP(length, 1024);
+ msleep(time_delay * 30);
+
+ memset(cmd_buf, 0, sizeof(cmd_buf));
+ cmd_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_READ;
+ cmd_buf[CMD_TYPE_OFFSET] = VND_GET_CHECKSUM;
+ error = wdt87xx_set_feature(client, cmd_buf, sizeof(cmd_buf));
+ if (error) {
+ dev_err(&client->dev, "failed to request checksum\n");
+ return error;
+ }
+
+ memset(pkt_buf, 0, sizeof(pkt_buf));
+ pkt_buf[CMD_REPORT_ID_OFFSET] = VND_READ_DATA;
+ error = wdt87xx_get_feature(client, pkt_buf, sizeof(pkt_buf));
+ if (error) {
+ dev_err(&client->dev, "failed to read checksum\n");
+ return error;
+ }
+
+ *checksum = get_unaligned_le16(&pkt_buf[CMD_DATA_OFFSET]);
+ return 0;
+}
+
+static int wdt87xx_write_firmware(struct i2c_client *client, const void *chunk)
+{
+ u32 start_addr = get_unaligned_le32(chunk + FW_CHUNK_TGT_START_OFFSET);
+ u32 size = get_unaligned_le32(chunk + FW_CHUNK_PAYLOAD_LEN_OFFSET);
+ const void *data = chunk + FW_CHUNK_PAYLOAD_OFFSET;
+ int error;
+ int err1;
+ int page_size;
+ int retry = 0;
+ u16 device_checksum, firmware_checksum;
+
+ dev_dbg(&client->dev, "start 4k page program\n");
+
+ error = wdt87xx_send_command(client, VND_CMD_STOP, MODE_STOP);
+ if (error) {
+ dev_err(&client->dev, "stop report mode failed\n");
+ return error;
+ }
+
+ error = wdt87xx_send_command(client, VND_CMD_SFUNL, 0);
+ if (error) {
+ dev_err(&client->dev, "unlock failed\n");
+ goto out_enable_reporting;
+ }
+
+ mdelay(10);
+
+ while (size) {
+ dev_dbg(&client->dev, "%s: %x, %x\n", __func__,
+ start_addr, size);
+
+ page_size = min_t(u32, size, PG_SIZE);
+ size -= page_size;
+
+ for (retry = 0; retry < MAX_RETRIES; retry++) {
+ error = wdt87xx_send_command(client, VND_CMD_ERASE,
+ start_addr);
+ if (error) {
+ dev_err(&client->dev,
+ "erase failed at %#08x\n", start_addr);
+ break;
+ }
+
+ msleep(50);
+
+ error = wdt87xx_write_data(client, data, start_addr,
+ page_size);
+ if (error) {
+ dev_err(&client->dev,
+ "write failed at %#08x (%d bytes)\n",
+ start_addr, page_size);
+ break;
+ }
+
+ error = wdt87xx_get_checksum(client, &device_checksum,
+ start_addr, page_size);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to retrieve checksum for %#08x (len: %d)\n",
+ start_addr, page_size);
+ break;
+ }
+
+ firmware_checksum =
+ wdt87xx_calculate_checksum(data, page_size);
+
+ if (device_checksum == firmware_checksum)
+ break;
+
+ dev_err(&client->dev,
+ "checksum fail: %d vs %d, retry %d\n",
+ device_checksum, firmware_checksum, retry);
+ }
+
+ if (retry == MAX_RETRIES) {
+ dev_err(&client->dev, "page write failed\n");
+ error = -EIO;
+ goto out_lock_device;
+ }
+
+ start_addr = start_addr + page_size;
+ data = data + page_size;
+ }
+
+out_lock_device:
+ err1 = wdt87xx_send_command(client, VND_CMD_SFLCK, 0);
+ if (err1)
+ dev_err(&client->dev, "lock failed\n");
+
+ mdelay(10);
+
+out_enable_reporting:
+ err1 = wdt87xx_send_command(client, VND_CMD_START, 0);
+ if (err1)
+ dev_err(&client->dev, "start to report failed\n");
+
+ return error ? error : err1;
+}
+
+static int wdt87xx_load_chunk(struct i2c_client *client,
+ const struct firmware *fw, u32 ck_id)
+{
+ const void *chunk;
+ int error;
+
+ chunk = wdt87xx_get_fw_chunk(fw, ck_id);
+ if (!chunk) {
+ dev_err(&client->dev, "unable to locate chunk (type %d)\n",
+ ck_id);
+ return -EINVAL;
+ }
+
+ error = wdt87xx_validate_fw_chunk(chunk, ck_id);
+ if (error) {
+ dev_err(&client->dev, "invalid chunk (type %d): %d\n",
+ ck_id, error);
+ return error;
+ }
+
+ error = wdt87xx_write_firmware(client, chunk);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write fw chunk (type %d): %d\n",
+ ck_id, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_do_update_firmware(struct i2c_client *client,
+ const struct firmware *fw,
+ unsigned int chunk_id)
+{
+ struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+ int error;
+
+ error = wdt87xx_validate_firmware(wdt, fw);
+ if (error)
+ return error;
+
+ error = mutex_lock_interruptible(&wdt->fw_mutex);
+ if (error)
+ return error;
+
+ disable_irq(client->irq);
+
+ error = wdt87xx_load_chunk(client, fw, chunk_id);
+ if (error) {
+ dev_err(&client->dev,
+ "firmware load failed (type: %d): %d\n",
+ chunk_id, error);
+ goto out;
+ }
+
+ error = wdt87xx_sw_reset(client);
+ if (error) {
+ dev_err(&client->dev, "soft reset failed: %d\n", error);
+ goto out;
+ }
+
+ /* Refresh the parameters */
+ error = wdt87xx_get_sysparam(client, &wdt->param);
+ if (error)
+ dev_err(&client->dev,
+ "failed to refresh system paramaters: %d\n", error);
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&wdt->fw_mutex);
+
+ return error ? error : 0;
+}
+
+static int wdt87xx_update_firmware(struct device *dev,
+ const char *fw_name, unsigned int chunk_id)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, fw_name, dev);
+ if (error) {
+ dev_err(&client->dev, "unable to retrieve firmware %s: %d\n",
+ fw_name, error);
+ return error;
+ }
+
+ error = wdt87xx_do_update_firmware(client, fw, chunk_id);
+
+ release_firmware(fw);
+
+ return error ? error : 0;
+}
+
+static ssize_t config_csum_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+ u32 cfg_csum;
+
+ cfg_csum = wdt->param.xmls_id1;
+ cfg_csum = (cfg_csum << 16) | wdt->param.xmls_id2;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", cfg_csum);
+}
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.fw_id);
+}
+
+static ssize_t plat_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.plat_id);
+}
+
+static ssize_t update_config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int error;
+
+ error = wdt87xx_update_firmware(dev, WDT87XX_CFG_NAME, CHUNK_ID_CNFG);
+
+ return error ? error : count;
+}
+
+static ssize_t update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int error;
+
+ error = wdt87xx_update_firmware(dev, WDT87XX_FW_NAME, CHUNK_ID_FRWR);
+
+ return error ? error : count;
+}
+
+static DEVICE_ATTR_RO(config_csum);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RO(plat_id);
+static DEVICE_ATTR_WO(update_config);
+static DEVICE_ATTR_WO(update_fw);
+
+static struct attribute *wdt87xx_attrs[] = {
+ &dev_attr_config_csum.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_plat_id.attr,
+ &dev_attr_update_config.attr,
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static const struct attribute_group wdt87xx_attr_group = {
+ .attrs = wdt87xx_attrs,
+};
+
+static void wdt87xx_report_contact(struct input_dev *input,
+ struct wdt87xx_sys_param *param,
+ u8 *buf)
+{
+ int finger_id;
+ u32 x, y, w;
+ u8 p;
+
+ finger_id = (buf[FINGER_EV_V1_OFFSET_ID] >> 3) - 1;
+ if (finger_id < 0)
+ return;
+
+ /* Check if this is an active contact */
+ if (!(buf[FINGER_EV_V1_OFFSET_ID] & 0x1))
+ return;
+
+ w = buf[FINGER_EV_V1_OFFSET_W];
+ w *= param->scaling_factor;
+
+ p = buf[FINGER_EV_V1_OFFSET_P];
+
+ x = get_unaligned_le16(buf + FINGER_EV_V1_OFFSET_X);
+
+ y = get_unaligned_le16(buf + FINGER_EV_V1_OFFSET_Y);
+ y = DIV_ROUND_CLOSEST(y * param->phy_h, param->phy_w);
+
+ /* Refuse incorrect coordinates */
+ if (x > param->max_x || y > param->max_y)
+ return;
+
+ dev_dbg(input->dev.parent, "tip on (%d), x(%d), y(%d)\n",
+ finger_id, x, y);
+
+ input_mt_slot(input, finger_id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+ input_report_abs(input, ABS_MT_PRESSURE, p);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+}
+
+static irqreturn_t wdt87xx_ts_interrupt(int irq, void *dev_id)
+{
+ struct wdt87xx_data *wdt = dev_id;
+ struct i2c_client *client = wdt->client;
+ int i, fingers;
+ int error;
+ u8 raw_buf[WDT_V1_RAW_BUF_COUNT] = {0};
+
+ error = i2c_master_recv(client, raw_buf, WDT_V1_RAW_BUF_COUNT);
+ if (error < 0) {
+ dev_err(&client->dev, "read v1 raw data failed: %d\n", error);
+ goto irq_exit;
+ }
+
+ fingers = raw_buf[TOUCH_PK_V1_OFFSET_FNGR_NUM];
+ if (!fingers)
+ goto irq_exit;
+
+ for (i = 0; i < WDT_MAX_FINGER; i++)
+ wdt87xx_report_contact(wdt->input,
+ &wdt->param,
+ &raw_buf[TOUCH_PK_V1_OFFSET_EVENT +
+ i * FINGER_EV_V1_SIZE]);
+
+ input_mt_sync_frame(wdt->input);
+ input_sync(wdt->input);
+
+irq_exit:
+ return IRQ_HANDLED;
+}
+
+static int wdt87xx_ts_create_input_device(struct wdt87xx_data *wdt)
+{
+ struct device *dev = &wdt->client->dev;
+ struct input_dev *input;
+ unsigned int res = DIV_ROUND_CLOSEST(MAX_UNIT_AXIS, wdt->param.phy_w);
+ int error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+ wdt->input = input;
+
+ input->name = "WDT87xx Touchscreen";
+ input->id.bustype = BUS_I2C;
+ input->phys = wdt->phys;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0,
+ wdt->param.max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
+ wdt->param.max_y, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, res);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, res);
+
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ 0, wdt->param.max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xFF, 0, 0);
+
+ input_mt_init_slots(input, WDT_MAX_FINGER,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wdt87xx_data *wdt;
+ int error;
+
+ dev_dbg(&client->dev, "adapter=%d, client irq: %d\n",
+ client->adapter->nr, client->irq);
+
+ /* Check if the I2C function is ok in this adaptor */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENXIO;
+
+ wdt = devm_kzalloc(&client->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->client = client;
+ mutex_init(&wdt->fw_mutex);
+ i2c_set_clientdata(client, wdt);
+
+ snprintf(wdt->phys, sizeof(wdt->phys), "i2c-%u-%04x/input0",
+ client->adapter->nr, client->addr);
+
+ error = wdt87xx_get_sysparam(client, &wdt->param);
+ if (error)
+ return error;
+
+ error = wdt87xx_ts_create_input_device(wdt);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, wdt87xx_ts_interrupt,
+ IRQF_ONESHOT,
+ client->name, wdt);
+ if (error) {
+ dev_err(&client->dev, "request irq failed: %d\n", error);
+ return error;
+ }
+
+ error = sysfs_create_group(&client->dev.kobj, &wdt87xx_attr_group);
+ if (error) {
+ dev_err(&client->dev, "create sysfs failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int wdt87xx_ts_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &wdt87xx_attr_group);
+
+ return 0;
+}
+
+static int __maybe_unused wdt87xx_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int error;
+
+ disable_irq(client->irq);
+
+ error = wdt87xx_send_command(client, VND_CMD_STOP, MODE_IDLE);
+ if (error) {
+ enable_irq(client->irq);
+ dev_err(&client->dev,
+ "failed to stop device when suspending: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused wdt87xx_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int error;
+
+ /*
+ * The chip may have been reset while system is resuming,
+ * give it some time to settle.
+ */
+ mdelay(100);
+
+ error = wdt87xx_send_command(client, VND_CMD_START, 0);
+ if (error)
+ dev_err(&client->dev,
+ "failed to start device when resuming: %d\n",
+ error);
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(wdt87xx_pm_ops, wdt87xx_suspend, wdt87xx_resume);
+
+static const struct i2c_device_id wdt87xx_dev_id[] = {
+ { WDT87XX_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+
+static const struct acpi_device_id wdt87xx_acpi_id[] = {
+ { "WDHT0001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+
+static struct i2c_driver wdt87xx_driver = {
+ .probe = wdt87xx_ts_probe,
+ .remove = wdt87xx_ts_remove,
+ .id_table = wdt87xx_dev_id,
+ .driver = {
+ .name = WDT87XX_NAME,
+ .pm = &wdt87xx_pm_ops,
+ .acpi_match_table = ACPI_PTR(wdt87xx_acpi_id),
+ },
+};
+module_i2c_driver(wdt87xx_driver);
+
+MODULE_AUTHOR("HN Chen <hn.chen@weidahitech.com>");
+MODULE_DESCRIPTION("WeidaHiTech WDT87XX Touchscreen driver");
+MODULE_VERSION(WDT87XX_DRV_VER);
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8d7e1c8b6d56..4dd88264dff5 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1055,7 +1055,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
processor = (struct acpi_madt_generic_interrupt *)header;
- if (BAD_MADT_ENTRY(processor, end))
+ if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
/*
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 4400edd1a6c7..b7d54d428b5e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void)
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
}
- /*
- * Some cores claim the FDC is routable but it doesn't actually seem to
- * be connected.
- */
- switch (current_cpu_type()) {
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- return -1;
- }
-
return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
index 0f6486d4f1b0..0f67ae32464f 100644
--- a/drivers/irqchip/irqchip.h
+++ b/drivers/irqchip/irqchip.h
@@ -8,21 +8,4 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef _IRQCHIP_H
-#define _IRQCHIP_H
-
-#include <linux/of.h>
-
-/*
- * This macro must be used by the different irqchip drivers to declare
- * the association between their DT compatible string and their
- * initialization function.
- *
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
- * same file.
- * @compstr: compatible string of the irqchip driver
- * @fn: initialization function
- */
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
-
-#endif
+#include <linux/irqchip.h>
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 135a0907e9de..ed2346ddf4c9 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -839,7 +839,7 @@ static void bitmap_file_kick(struct bitmap *bitmap)
if (bitmap->storage.file) {
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (path)
- ptr = d_path(&bitmap->storage.file->f_path,
+ ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
printk(KERN_ALERT
@@ -1927,7 +1927,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
chunk_kb ? "KB" : "B");
if (bitmap->storage.file) {
seq_printf(seq, ", file: ");
- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
+ seq_file_path(seq, bitmap->storage.file, " \t\n");
}
seq_printf(seq, "\n");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index df92d30ca054..d429c30cd514 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5766,7 +5766,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap_info.file)
file->pathname[0] = '\0';
- else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
+ else if ((ptr = file_path(mddev->bitmap_info.file,
file->pathname, sizeof(file->pathname))),
IS_ERR(ptr))
err = PTR_ERR(ptr);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 8911e51d410a..3a27a84ad3ec 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2074,14 +2074,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
ret = gpmc_probe_nand_child(pdev, child);
else if (of_node_cmp(child->name, "onenand") == 0)
ret = gpmc_probe_onenand_child(pdev, child);
- else if (of_node_cmp(child->name, "ethernet") == 0 ||
- of_node_cmp(child->name, "nor") == 0 ||
- of_node_cmp(child->name, "uart") == 0)
+ else
ret = gpmc_probe_generic_child(pdev, child);
-
- if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
- __func__, child->full_name))
- of_node_put(child);
}
return 0;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 0c77240ae2fc..729e0851167d 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
afu = cxl_pci_to_afu(dev);
+ get_device(&afu->dev);
ctx = cxl_context_alloc();
if (IS_ERR(ctx))
return ctx;
@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
rc = cxl_context_init(ctx, afu, false, NULL);
if (rc) {
kfree(ctx);
+ put_device(&afu->dev);
return ERR_PTR(-ENOMEM);
}
cxl_assign_psn_space(ctx);
@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx)
if (ctx->status != CLOSED)
return -EBUSY;
+ put_device(&ctx->afu->dev);
+
cxl_context_free(ctx);
return 0;
@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
}
ctx->status = STARTED;
- get_device(&ctx->afu->dev);
out:
mutex_unlock(&ctx->status_mutex);
return rc;
@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
/* Stop a context. Returns 0 on success, otherwise -Errno */
int cxl_stop_context(struct cxl_context *ctx)
{
- int rc;
-
- rc = __detach_context(ctx);
- if (!rc)
- put_device(&ctx->afu->dev);
- return rc;
+ return __detach_context(ctx);
}
EXPORT_SYMBOL_GPL(cxl_stop_context);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2a4c80ac322a..1287148629c0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
- if (offset > ctx->afu->adapter->ps_size)
+ if (offset >= ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
- if (offset > ctx->psn_size)
+ if (offset >= ctx->psn_size)
return VM_FAULT_SIGBUS;
}
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
*/
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
+ u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start;
- len = min(len, ctx->psn_size);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ if (start + len > ctx->afu->adapter->ps_size)
+ return -EINVAL;
+ } else {
+ if (start + len > ctx->psn_size)
+ return -EINVAL;
+ }
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
/* make sure there is a valid per process space for this AFU */
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 833348e2c9cb..4a164ab8b35a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) {
afu = adapter->afu[slice];
- if (!afu->enabled)
+ if (!afu || !afu->enabled)
continue;
rcu_read_lock();
idr_for_each_entry(&afu->contexts_idr, ctx, id)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index c68ef5806dbe..32ad09705949 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -539,7 +539,7 @@ err:
static void cxl_unmap_slice_regs(struct cxl_afu *afu)
{
- if (afu->p1n_mmio)
+ if (afu->p2n_mmio)
iounmap(afu->p2n_mmio);
if (afu->p1n_mmio)
iounmap(afu->p1n_mmio);
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index b1d1983a84a5..2eba002b580b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
unsigned long addr;
phb = pci_bus_to_host(bus);
- afu = (struct cxl_afu *)phb->private_data;
if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
+ afu = (struct cxl_afu *)phb->private_data;
+
if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= (unsigned long)phb->cfg_data)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 357b6ae4d207..458aa5a09c52 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
schedule_work(&device->event_work);
}
-void mei_cl_bus_remove_devices(struct mei_device *dev)
-{
- struct mei_cl *cl, *next;
-
- mutex_lock(&dev->device_lock);
- list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
- if (cl->device)
- mei_cl_remove_device(cl->device);
-
- list_del(&cl->device_link);
- mei_cl_unlink(cl);
- kfree(cl);
- }
- mutex_unlock(&dev->device_lock);
-}
-
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 94514b2c7a50..00c3865ca3b1 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
mei_nfc_host_exit(dev);
- mei_cl_bus_remove_devices(dev);
-
mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index b983c4ecad38..290ef3037437 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
cldev->priv_data = NULL;
- mutex_lock(&dev->device_lock);
/* Need to remove the device here
* since mei_nfc_free will unlink the clients
*/
mei_cl_remove_device(cldev);
+
+ mutex_lock(&dev->device_lock);
mei_nfc_free(ndev);
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9df2b6801f76..b2b411da297b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/platform_data/hsmmc-omap.h>
/* OMAP HSMMC Host Controller Registers */
@@ -218,7 +219,6 @@ struct omap_hsmmc_host {
unsigned int flags;
#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
-#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
struct omap_hsmmc_next next_data;
struct omap_hsmmc_platform_data *pdata;
@@ -1117,22 +1117,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = dev_id;
-
- /* cirq is level triggered, disable to avoid infinite loop */
- spin_lock(&host->irq_lock);
- if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
- disable_irq_nosync(host->wake_irq);
- host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
- }
- spin_unlock(&host->irq_lock);
- pm_request_resume(host->dev); /* no use counter */
-
- return IRQ_HANDLED;
-}
-
static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;
@@ -1665,7 +1649,6 @@ static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
- struct mmc_host *mmc = host->mmc;
int ret;
/*
@@ -1677,11 +1660,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
if (!host->dev->of_node || !host->wake_irq)
return -ENODEV;
- /* Prevent auto-enabling of IRQ */
- irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- mmc_hostname(mmc), host);
+ ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
goto err;
@@ -1718,7 +1697,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
return 0;
err_free_irq:
- devm_free_irq(host->dev, host->wake_irq, host);
+ dev_pm_clear_wake_irq(host->dev);
err:
dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
host->wake_irq = 0;
@@ -2007,6 +1986,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
}
+ device_init_wakeup(&pdev->dev, true);
pm_runtime_enable(host->dev);
pm_runtime_get_sync(host->dev);
pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
@@ -2147,6 +2127,7 @@ err_slot_name:
if (host->use_reg)
omap_hsmmc_reg_put(host);
err_irq:
+ device_init_wakeup(&pdev->dev, false);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
if (host->rx_chan)
@@ -2178,6 +2159,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
+ device_init_wakeup(&pdev->dev, false);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@@ -2204,11 +2186,6 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- /* do not wake up due to sdio irq */
- if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
- !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
- disable_irq(host->wake_irq);
-
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@@ -2233,11 +2210,6 @@ static int omap_hsmmc_resume(struct device *dev)
omap_hsmmc_conf_bus_power(host);
omap_hsmmc_protect_card(host);
-
- if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
- !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
- enable_irq(host->wake_irq);
-
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -2277,10 +2249,6 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
}
pinctrl_pm_select_idle_state(dev);
-
- WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
- enable_irq(host->wake_irq);
- host->flags |= HSMMC_WAKE_IRQ_ENABLED;
} else {
pinctrl_pm_select_idle_state(dev);
}
@@ -2302,11 +2270,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_lock_irqsave(&host->irq_lock, flags);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- /* sdio irq flag can't change while in runtime suspend */
- if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
- disable_irq_nosync(host->wake_irq);
- host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
- }
pinctrl_pm_select_default_state(host->dev);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 019fceffc9e5..c18f9e62a9fa 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -217,8 +217,8 @@ config NET_POLL_CONTROLLER
def_bool NETPOLL
config NTB_NETDEV
- tristate "Virtual Ethernet over NTB"
- depends on NTB
+ tristate "Virtual Ethernet over NTB Transport"
+ depends on NTB_TRANSPORT
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
@@ -258,6 +258,20 @@ config TUN
If you don't know what to use this for, you don't need it.
+config TUN_VNET_CROSS_LE
+ bool "Support for cross-endian vnet headers on little-endian kernels"
+ default n
+ ---help---
+ This option allows TUN/TAP and MACVTAP device drivers in a
+ little-endian kernel to parse vnet headers that come from a
+ big-endian legacy virtio device.
+
+ Userspace programs can control the feature using the TUNSETVNETBE
+ and TUNGETVNETBE ioctls.
+
+ Unless you have a little-endian system hosting a big-endian virtual
+ machine with a legacy virtio NIC, you should say N.
+
config VETH
tristate "Virtual ethernet pair device"
---help---
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 19eb990d398c..317a49480475 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -689,40 +689,57 @@ out:
}
-static bool bond_should_change_active(struct bonding *bond)
+static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
- if (!prim || !curr || curr->link != BOND_LINK_UP)
- return true;
+ if (!prim || prim->link != BOND_LINK_UP) {
+ if (!curr || curr->link != BOND_LINK_UP)
+ return NULL;
+ return curr;
+ }
+
if (bond->force_primary) {
bond->force_primary = false;
- return true;
+ return prim;
+ }
+
+ if (!curr || curr->link != BOND_LINK_UP)
+ return prim;
+
+ /* At this point, prim and curr are both up */
+ switch (bond->params.primary_reselect) {
+ case BOND_PRI_RESELECT_ALWAYS:
+ return prim;
+ case BOND_PRI_RESELECT_BETTER:
+ if (prim->speed < curr->speed)
+ return curr;
+ if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
+ return curr;
+ return prim;
+ case BOND_PRI_RESELECT_FAILURE:
+ return curr;
+ default:
+ netdev_err(bond->dev, "impossible primary_reselect %d\n",
+ bond->params.primary_reselect);
+ return curr;
}
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
- (prim->speed < curr->speed ||
- (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
- return false;
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
- return false;
- return true;
}
/**
- * find_best_interface - select the best available slave to be the active one
+ * bond_find_best_slave - select the best available slave to be the active one
* @bond: our bonding struct
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *slave, *bestslave = NULL, *primary;
+ struct slave *slave, *bestslave = NULL;
struct list_head *iter;
int mintime = bond->params.updelay;
- primary = rtnl_dereference(bond->primary_slave);
- if (primary && primary->link == BOND_LINK_UP &&
- bond_should_change_active(bond))
- return primary;
+ slave = bond_choose_primary_or_current(bond);
+ if (slave)
+ return slave;
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 041525d2595c..5d214d135332 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int err;
+ struct pinctrl *p;
/* basic c_can configuration */
err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* activate pins */
- pinctrl_pm_select_default_state(dev->dev.parent);
+ /* Attempt to use "active" if available else use "default" */
+ p = pinctrl_get_select(priv->device, "active");
+ if (!IS_ERR(p))
+ pinctrl_put(p);
+ else
+ pinctrl_pm_select_default_state(priv->device);
+
return 0;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e9b1810d319f..aede704605c6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
memset(*cfd, 0, sizeof(struct canfd_frame));
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7deb80dcbe8c..7bd54191f962 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
err = clk_prepare_enable(priv->clk);
if (err) {
- netdev_err(ndev, "failed to enable periperal clock, error %d\n",
+ netdev_err(ndev,
+ "failed to enable peripheral clock, error %d\n",
err);
goto out;
}
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ netdev_err(ndev, "request_irq(%d) failed, error %d\n",
+ ndev->irq, err);
goto out_close;
}
can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
+ err = irq;
goto fail;
}
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "clkp1");
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
goto fail_clk;
}
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
devm_can_led_init(ndev);
- dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
priv->regs, ndev->irq);
return 0;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f64f5290d6f8..a23a7af8eb9a 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
if (!skb)
return;
- __net_timestamp(skb);
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0ce868de855d..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx_ni(skb);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad97f..2d1ce3c5d0dd 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2382,6 +2382,7 @@ boomerang_interrupt(int irq, void *dev_id)
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
+ int handled = 0;
ioaddr = vp->ioaddr;
@@ -2400,6 +2401,7 @@ boomerang_interrupt(int irq, void *dev_id)
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
+ handled = 1;
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
@@ -2501,7 +2503,7 @@ boomerang_interrupt(int irq, void *dev_id)
handler_exit:
vp->handling_irq = 0;
spin_unlock(&vp->lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static int vortex_rx(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 661cdaa7ea96..b3bc87fe3764 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
get_page(pa->pages);
bd->pa = *pa;
- bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 506e832c9e9a..a4473d8ff4fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
unsigned int rx_usecs = pdata->rx_usecs;
unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
+ dma_addr_t hdr_dma, buf_dma;
if (!rx_usecs && !rx_frames) {
/* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+ hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+ buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
/* Start with the header buffer which may contain just the header
* or the header plus data
*/
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
- rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
if (!skb)
error = 1;
} else if (rdesc_len) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.buf.dma,
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 63d72a140053..717ce21b6077 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
- dma_addr_t dma;
+ dma_addr_t dma_base;
+ unsigned long dma_off;
unsigned int dma_len;
};
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 909ad7a0d480..4566cdf0bc39 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
macaddr = of_get_mac_address(dn);
if (!macaddr || !is_valid_ether_addr(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
} else {
ether_addr_copy(dev->dev_addr, macaddr);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b43b2cb9b830..64c1e9db6b0b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
new_skb = skb_realloc_headroom(skb, sizeof(*status));
dev_kfree_skb(skb);
if (!new_skb) {
- dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return NULL;
}
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
goto next;
}
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dev->stats.rx_frame_errors++;
if (dma_flag & DMA_RX_LG)
dev->stats.rx_length_errors++;
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..42e20e5385ac 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_RAEM_V 0x8
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
static struct platform_device_id fec_devtype[] = {
{
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- /* return value */
- return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
if (fep->clk_enet_out) {
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
} else {
clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
clk_disable_unprepare(fep->clk_ahb);
return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ ret = pm_runtime_get_sync(&fep->pdev->dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret)
- return ret;
+ goto clk_enable;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
fec_enet_free_buffers(ndev);
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
+clk_enable:
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
}
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+
fec_enet_free_buffers(ndev);
return 0;
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -3434,6 +3465,8 @@ fec_probe(struct platform_device *pdev)
netif_carrier_off(ndev);
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
ret = register_netdev(ndev);
if (ret)
@@ -3447,6 +3480,12 @@ fec_probe(struct platform_device *pdev)
fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
failed_register:
@@ -3457,6 +3496,8 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
failed_phy:
@@ -3568,7 +3609,28 @@ failed_clk:
return ret;
}
-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_disable_unprepare(fep->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return clk_prepare_enable(fep->clk_ipg);
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
static struct platform_driver fec_driver = {
.driver = {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 847643455468..605cc8948594 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
return resource_size(&efx->pci_dev->resource[bar]);
}
+static bool efx_ef10_is_vf(struct efx_nic *efx)
+{
+ return efx->type->is_vf;
+}
+
static int efx_ef10_get_pf_index(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_probe_vf(struct efx_nic *efx)
{
@@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac_old[ETH_ALEN];
+ int rc, rc2;
+
+ /* Only reconfigure a PF-created vport */
+ if (is_zero_ether_addr(nic_data->vport_mac))
+ return 0;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+
+ rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ if (rc)
+ goto restore_filters;
+
+ ether_addr_copy(mac_old, nic_data->vport_mac);
+ rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ nic_data->vport_mac);
+ if (rc)
+ goto restore_vadaptor;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ if (!rc) {
+ ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
+ } else {
+ rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ if (rc2) {
+ /* Failed to add original MAC, so clear vport_mac */
+ eth_zero_addr(nic_data->vport_mac);
+ goto reset_nic;
+ }
+ }
+
+restore_vadaptor:
+ rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc2)
+ goto reset_nic;
+restore_filters:
+ down_write(&efx->filter_sem);
+ rc2 = efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc2)
+ goto reset_nic;
+
+ rc2 = efx_net_open(efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(efx->net_dev);
+
+ return rc;
+
+reset_nic:
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore when changing MAC address - scheduling reset\n");
+ efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
+
+ return rc ? rc : rc2;
+}
+
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
nic_data->vport_id);
- rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
@@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
-#if !defined(CONFIG_SFC_SRIOV)
- if (rc == -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
-#else
- if (rc == -EPERM) {
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
- /* Switch to PF and change MAC address on vport */
- if (efx->pci_dev->is_virtfn && pci_dev_pf) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ if (rc == -EPERM) {
+ struct efx_nic *efx_pf;
- if (!efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr))
- return 0;
- }
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
- } else if (efx->pci_dev->is_virtfn) {
- /* Successfully changed by VF (with MAC spoofing), so update the
- * parent PF if possible.
- */
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ /* Switch to PF and change MAC address on vport */
+ efx_pf = pci_get_drvdata(pci_dev_pf);
- if (pci_dev_pf) {
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr);
+ } else if (!rc) {
struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
unsigned int i;
+ /* MAC address successfully changed by VF (with MAC
+ * spoofing) so update the parent PF if possible.
+ */
for (i = 0; i < efx_pf->vf_count; ++i) {
struct ef10_vf *vf = nic_data->vf + i;
@@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
}
}
}
- }
+ } else
#endif
+ if (rc == -EPERM) {
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable"
+ " mac-spoofing on this interface\n");
+ } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
+ /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
+ * fall-back to the method of changing the MAC address on the
+ * vport. This only applies to PFs because such versions of
+ * MCFW do not support VFs.
+ */
+ rc = efx_ef10_vport_set_mac_address(efx);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
+ sizeof(inbuf), NULL, 0, rc);
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 6c9b6e45509a..3c17f274e802 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
NULL, 0, NULL);
}
-static int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
-static int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
unsigned int vswitch_type)
{
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
NULL, 0, NULL);
}
-static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
vf->vlan, &vf->vport_id);
if (rc)
- goto reset_nic;
+ goto reset_nic_up_write;
restore_mac:
if (!is_zero_ether_addr(vf->mac)) {
rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
if (rc2) {
eth_zero_addr(vf->mac);
- goto reset_nic;
+ goto reset_nic_up_write;
}
}
restore_evb_port:
rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
else
vf->vport_assigned = 1;
@@ -662,14 +620,16 @@ restore_vadaptor:
if (vf->efx) {
rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
}
restore_filters:
if (vf->efx) {
rc2 = vf->efx->type->filter_table_probe(vf->efx);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
+
+ up_write(&vf->efx->filter_sem);
up_write(&vf->efx->filter_sem);
@@ -681,9 +641,12 @@ restore_filters:
}
return rc;
+reset_nic_up_write:
+ if (vf->efx)
+ up_write(&vf->efx->filter_sem);
+
reset_nic:
if (vf->efx) {
- up_write(&vf->efx->filter_sem);
netif_err(efx, drv, efx->net_dev,
"Failed to restore VF - scheduling reset.\n");
efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index db4ef537c610..6d25b92cb45e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 804b9ad553d3..03bc03b67f08 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
+ struct efx_tx_queue *tx_queue;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
spent = efx_nic_process_eventq(channel, budget);
if (spent && efx_channel_has_rx_queue(channel)) {
struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_fast_push_rx_descriptors(rx_queue, true);
}
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl, tx_queue->bytes_compl);
+ }
+ }
+
return spent;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d72f522bf9c3..47d1e3a96522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -241,6 +241,8 @@ struct efx_tx_queue {
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf2987512b5..1833a0146571 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 462820514fae..f335bf119ab5 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_enable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- enable_irq(priv->irqs_table[i]); \
- } while (0)
-#define cpsw_disable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- disable_irq_nosync(priv->irqs_table[i]); \
- } while (0)
-
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
(func)(slave++, ##arg); \
} while (0)
#define cpsw_get_slave_ndev(priv, __slave_no__) \
- (priv->slaves[__slave_no__].ndev)
+ ((__slave_no__ < priv->data.slaves) ? \
+ priv->slaves[__slave_no__].ndev : NULL)
#define cpsw_get_slave_priv(priv, __slave_no__) \
- ((priv->slaves[__slave_no__].ndev) ? \
+ (((__slave_no__ < priv->data.slaves) && \
+ (priv->slaves[__slave_no__].ndev)) ? \
netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
- cpsw_disable_irq(priv);
+ disable_irq_nosync(priv->irqs_table[0]);
priv->irq_enabled = false;
}
@@ -817,7 +806,7 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(priv);
+ enable_irq(priv->irqs_table[0]);
}
}
@@ -1333,7 +1322,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (prim_cpsw->irq_enabled == false) {
if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(prim_cpsw);
+ enable_irq(prim_cpsw->irqs_table[0]);
}
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4208dd7ef101..d95f9aae95e7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
- if (!lp->regs) {
+ if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->regs);
goto free_netdev;
}
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
- if (!lp->dma_regs) {
+ if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->dma_regs);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 7856b6ccf5c5..d95a50ae996d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
dev->flags = 0;
+ dev->features = NETIF_F_LLTX; /* Allow recursion */
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6a64197f5bce..3b933bb5a8d5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -48,15 +48,70 @@ struct macvtap_queue {
#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
#define MACVTAP_VNET_LE 0x80000000
+#define MACVTAP_VNET_BE 0x40000000
+
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
+{
+ return q->flags & MACVTAP_VNET_BE ? false :
+ virtio_legacy_is_little_endian();
+}
+
+static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
+{
+ int s = !!(q->flags & MACVTAP_VNET_BE);
+
+ if (put_user(s, sp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
+{
+ int s;
+
+ if (get_user(s, sp))
+ return -EFAULT;
+
+ if (s)
+ q->flags |= MACVTAP_VNET_BE;
+ else
+ q->flags &= ~MACVTAP_VNET_BE;
+
+ return 0;
+}
+#else
+static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
+{
+ return virtio_legacy_is_little_endian();
+}
+
+static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
+{
+ return -EINVAL;
+}
+
+static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
+{
+ return q->flags & MACVTAP_VNET_LE ||
+ macvtap_legacy_is_little_endian(q);
+}
static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
{
- return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
+ return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
}
static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
{
- return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
+ return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
}
static struct proto macvtap_proto = {
@@ -1085,6 +1140,12 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->flags &= ~MACVTAP_VNET_LE;
return 0;
+ case TUNGETVNETBE:
+ return macvtap_get_vnet_be(q, sp);
+
+ case TUNSETVNETBE:
+ return macvtap_set_vnet_be(q, sp);
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
@@ -1294,6 +1355,7 @@ static void macvtap_exit(void)
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+ idr_destroy(&minor_idr);
}
module_exit(macvtap_exit);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 5a7e6397440a..3cc316cb7e6b 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +42,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Intel PCIe NTB Network Linux driver
+ * PCIe NTB Network Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
@@ -50,6 +52,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ntb.h>
+#include <linux/ntb_transport.h>
#define NTB_NETDEV_VER "0.7"
@@ -70,26 +73,19 @@ struct ntb_netdev {
static LIST_HEAD(dev_list);
-static void ntb_netdev_event_handler(void *data, int status)
+static void ntb_netdev_event_handler(void *data, int link_is_up)
{
struct net_device *ndev = data;
struct ntb_netdev *dev = netdev_priv(ndev);
- netdev_dbg(ndev, "Event %x, Link %x\n", status,
+ netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
ntb_transport_link_query(dev->qp));
- switch (status) {
- case NTB_LINK_DOWN:
+ if (link_is_up) {
+ if (ntb_transport_link_query(dev->qp))
+ netif_carrier_on(ndev);
+ } else {
netif_carrier_off(ndev);
- break;
- case NTB_LINK_UP:
- if (!ntb_transport_link_query(dev->qp))
- return;
-
- netif_carrier_on(ndev);
- break;
- default:
- netdev_warn(ndev, "Unsupported event type %d\n", status);
}
}
@@ -160,8 +156,6 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
struct ntb_netdev *dev = netdev_priv(ndev);
int rc;
- netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
-
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
if (rc)
goto err;
@@ -322,20 +316,26 @@ static const struct ntb_queue_handlers ntb_netdev_handlers = {
.event_handler = ntb_netdev_event_handler,
};
-static int ntb_netdev_probe(struct pci_dev *pdev)
+static int ntb_netdev_probe(struct device *client_dev)
{
+ struct ntb_dev *ntb;
struct net_device *ndev;
+ struct pci_dev *pdev;
struct ntb_netdev *dev;
int rc;
- ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+ ntb = dev_ntb(client_dev->parent);
+ pdev = ntb->pdev;
+ if (!pdev)
+ return -ENODEV;
+
+ ndev = alloc_etherdev(sizeof(*dev));
if (!ndev)
return -ENOMEM;
dev = netdev_priv(ndev);
dev->ndev = ndev;
dev->pdev = pdev;
- BUG_ON(!dev->pdev);
ndev->features = NETIF_F_HIGHDMA;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -349,7 +349,8 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
- dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+ dev->qp = ntb_transport_create_queue(ndev, client_dev,
+ &ntb_netdev_handlers);
if (!dev->qp) {
rc = -EIO;
goto err;
@@ -372,12 +373,17 @@ err:
return rc;
}
-static void ntb_netdev_remove(struct pci_dev *pdev)
+static void ntb_netdev_remove(struct device *client_dev)
{
+ struct ntb_dev *ntb;
struct net_device *ndev;
+ struct pci_dev *pdev;
struct ntb_netdev *dev;
bool found = false;
+ ntb = dev_ntb(client_dev->parent);
+ pdev = ntb->pdev;
+
list_for_each_entry(dev, &dev_list, list) {
if (dev->pdev == pdev) {
found = true;
@@ -396,7 +402,7 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
free_netdev(ndev);
}
-static struct ntb_client ntb_netdev_client = {
+static struct ntb_transport_client ntb_netdev_client = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.probe = ntb_netdev_probe,
@@ -407,16 +413,16 @@ static int __init ntb_netdev_init_module(void)
{
int rc;
- rc = ntb_register_client_dev(KBUILD_MODNAME);
+ rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
if (rc)
return rc;
- return ntb_register_client(&ntb_netdev_client);
+ return ntb_transport_register_client(&ntb_netdev_client);
}
module_init(ntb_netdev_init_module);
static void __exit ntb_netdev_exit_module(void)
{
- ntb_unregister_client(&ntb_netdev_client);
- ntb_unregister_client_dev(KBUILD_MODNAME);
+ ntb_transport_unregister_client(&ntb_netdev_client);
+ ntb_transport_unregister_client_dev(KBUILD_MODNAME);
}
module_exit(ntb_netdev_exit_module);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cf18940f4e84..cb86d7a01542 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO
config MDIO_BUS_MUX_MMIOREG
tristate "Support for MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO
+ depends on OF_MDIO && HAS_IOMEM
select MDIO_BUS_MUX
help
This module provides a driver for MDIO bus multiplexers that
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1a1c4f7b3ec5..06a039414628 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -111,6 +111,7 @@ do { \
#define TUN_FASYNC IFF_ATTACH_QUEUE
/* High bits in flags field are unused. */
#define TUN_VNET_LE 0x80000000
+#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE)
@@ -205,14 +206,68 @@ struct tun_struct {
u32 flow_count;
};
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
+{
+ return tun->flags & TUN_VNET_BE ? false :
+ virtio_legacy_is_little_endian();
+}
+
+static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+ int be = !!(tun->flags & TUN_VNET_BE);
+
+ if (put_user(be, argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+ int be;
+
+ if (get_user(be, argp))
+ return -EFAULT;
+
+ if (be)
+ tun->flags |= TUN_VNET_BE;
+ else
+ tun->flags &= ~TUN_VNET_BE;
+
+ return 0;
+}
+#else
+static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
+{
+ return virtio_legacy_is_little_endian();
+}
+
+static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+ return -EINVAL;
+}
+
+static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool tun_is_little_endian(struct tun_struct *tun)
+{
+ return tun->flags & TUN_VNET_LE ||
+ tun_legacy_is_little_endian(tun);
+}
+
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
- return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
+ return __virtio16_to_cpu(tun_is_little_endian(tun), val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
- return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
+ return __cpu_to_virtio16(tun_is_little_endian(tun), val);
}
static inline u32 tun_hashfn(u32 rxhash)
@@ -2044,6 +2099,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->flags &= ~TUN_VNET_LE;
break;
+ case TUNGETVNETBE:
+ ret = tun_get_vnet_be(tun, argp);
+ break;
+
+ case TUNSETVNETBE:
+ ret = tun_set_vnet_be(tun, argp);
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4545e78840b0..35a2bffe848a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define NVIDIA_VENDOR_ID 0x0955
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e4b7a47a825c..efc18e05af0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
- ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
if (ret)
goto err;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8067b8fbb0ee..db40175b1a0b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -6,7 +6,7 @@
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
* USB Host Driver for Network Control Model (NCM)
- * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
*
* The NCM encoding, decoding and initialization logic
* derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
+ kfree(ctx->delayed_ndp16);
+
kfree(ctx);
}
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
+ /* Device-specific flags */
+ ctx->drvflags = drvflags;
+
+ /* Allocate the delayed NDP if needed. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
+ }
+
/* override ethtool_ops */
dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
- /* The NCM data altsetting is fixed */
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
+ /* The NCM data altsetting is fixed, so we hard-coded it.
+ * Additionally, generic NCM devices are assumed to accept arbitrarily
+ * placed NDP.
+ */
+ ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
/*
* We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ if (ctx->delayed_ndp16->dwSignature == sign)
+ return ctx->delayed_ndp16;
+
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
/* align new NDP */
- cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */
if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ else
+ ndp16 = ctx->delayed_ndp16;
+
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
+ u32 delayed_ndp_size;
+
+ /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
+ * accordingly. Otherwise, we should check here.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ delayed_ndp_size = ctx->max_ndp_size;
+ else
+ delayed_ndp_size = 0;
/* if there is a remaining skb, it gets priority */
if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
+ if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
+ /* If requested, put NDP at end of frame. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ }
+
/* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 735f7dadb9a0..2680a65cd5e4 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret = -ENODEV;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ int drvflags = 0;
/* altsetting should always be 1 for NCM devices - so we hard-coded
- * it here
+ * it here. Some huawei devices will need the NDP part of the NCM package to
+ * be at the end of the frame.
*/
- ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+ drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+ ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index aafa1a1898e4..7f6419ebb5e1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -494,6 +494,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -4117,6 +4118,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da11bb5e9c7f..46f4caddccbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
static const u32 rxprod_reg[2] = {
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
- u32 num_rxd = 0;
+ u32 num_pkts = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
struct vmxnet3_cmd_ring *ring = NULL;
- if (num_rxd >= quota) {
+ if (num_pkts >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
- num_rxd++;
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
+ num_pkts++;
}
rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
- return num_rxd;
+ return num_pkts;
}
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index feacc3b994b7..2f0bd6955f33 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
* @dev: The network device to attach
* @c: The Z8530 channel to configure in sync DMA mode.
*
- * Set up a Z85x30 device for synchronous DMA tranmission. One
+ * Set up a Z85x30 device for synchronous DMA transmission. One
* ISA DMA channel must be available for this to work. The receive
* side is run in PIO mode, but then it has the bigger FIFO.
*/
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
index f69df793dbe2..95944e52fa36 100644
--- a/drivers/ntb/Kconfig
+++ b/drivers/ntb/Kconfig
@@ -1,13 +1,28 @@
-config NTB
- tristate "Intel Non-Transparent Bridge support"
- depends on PCI
- depends on X86
- help
- The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
- connecting 2 systems. When configured, writes to the device's PCI
- mapped memory will be mirrored to a buffer on the remote system. The
- ntb Linux driver uses this point-to-point communication as a method to
- transfer data from one system to the other.
-
- If unsure, say N.
+menuconfig NTB
+ tristate "Non-Transparent Bridge support"
+ depends on PCI
+ help
+ The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+ connecting 2 systems. When configured, writes to the device's PCI
+ mapped memory will be mirrored to a buffer on the remote system. The
+ ntb Linux driver uses this point-to-point communication as a method to
+ transfer data from one system to the other.
+ If unsure, say N.
+
+if NTB
+
+source "drivers/ntb/hw/Kconfig"
+
+source "drivers/ntb/test/Kconfig"
+
+config NTB_TRANSPORT
+ tristate "NTB Transport Client"
+ help
+ This is a transport driver that enables connected systems to exchange
+ messages over the ntb hardware. The transport exposes a queue pair api
+ to client drivers.
+
+ If unsure, say N.
+
+endif # NTB
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
index 15cb59fd354e..1921dec1949d 100644
--- a/drivers/ntb/Makefile
+++ b/drivers/ntb/Makefile
@@ -1,3 +1,2 @@
-obj-$(CONFIG_NTB) += ntb.o
-
-ntb-objs := ntb_hw.o ntb_transport.o
+obj-$(CONFIG_NTB) += ntb.o hw/ test/
+obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
new file mode 100644
index 000000000000..4d5535c4cddf
--- /dev/null
+++ b/drivers/ntb/hw/Kconfig
@@ -0,0 +1 @@
+source "drivers/ntb/hw/intel/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
new file mode 100644
index 000000000000..175d7c92a569
--- /dev/null
+++ b/drivers/ntb/hw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_INTEL) += intel/
diff --git a/drivers/ntb/hw/intel/Kconfig b/drivers/ntb/hw/intel/Kconfig
new file mode 100644
index 000000000000..91f995e33ac6
--- /dev/null
+++ b/drivers/ntb/hw/intel/Kconfig
@@ -0,0 +1,7 @@
+config NTB_INTEL
+ tristate "Intel Non-Transparent Bridge support"
+ depends on X86_64
+ help
+ This driver supports Intel NTB on capable Xeon and Atom hardware.
+
+ If unsure, say N.
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile
new file mode 100644
index 000000000000..1b434568d2ad
--- /dev/null
+++ b/drivers/ntb/hw/intel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
new file mode 100644
index 000000000000..87751cfd6f4f
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -0,0 +1,2274 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_intel.h"
+
+#define NTB_NAME "ntb_hw_intel"
+#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER "2.0"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+#define bar0_off(base, bar) ((base) + ((bar) << 2))
+#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
+
+static const struct intel_ntb_reg atom_reg;
+static const struct intel_ntb_alt_reg atom_pri_reg;
+static const struct intel_ntb_alt_reg atom_sec_reg;
+static const struct intel_ntb_alt_reg atom_b2b_reg;
+static const struct intel_ntb_xlat_reg atom_pri_xlat;
+static const struct intel_ntb_xlat_reg atom_sec_xlat;
+static const struct intel_ntb_reg xeon_reg;
+static const struct intel_ntb_alt_reg xeon_pri_reg;
+static const struct intel_ntb_alt_reg xeon_sec_reg;
+static const struct intel_ntb_alt_reg xeon_b2b_reg;
+static const struct intel_ntb_xlat_reg xeon_pri_xlat;
+static const struct intel_ntb_xlat_reg xeon_sec_xlat;
+static struct intel_b2b_addr xeon_b2b_usd_addr;
+static struct intel_b2b_addr xeon_b2b_dsd_addr;
+static const struct ntb_dev_ops intel_ntb_ops;
+
+static const struct file_operations intel_ntb_debugfs_info;
+static struct dentry *debugfs_dir;
+
+static int b2b_mw_idx = -1;
+module_param(b2b_mw_idx, int, 0644);
+MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
+ "value of zero or positive starts from first mw idx, and a "
+ "negative value starts from last mw idx. Both sides MUST "
+ "set the same value here!");
+
+static unsigned int b2b_mw_share;
+module_param(b2b_mw_share, uint, 0644);
+MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
+ "ntb so that the peer ntb only occupies the first half of "
+ "the mw, so the second half can still be used as a mw. Both "
+ "sides MUST set the same value here!");
+
+module_param_named(xeon_b2b_usd_bar2_addr64,
+ xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+ "XEON B2B USD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr64,
+ xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+ "XEON B2B USD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr32,
+ xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+ "XEON B2B USD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_usd_bar5_addr32,
+ xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+ "XEON B2B USD split-BAR 5 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar2_addr64,
+ xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+ "XEON B2B DSD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr64,
+ xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+ "XEON B2B DSD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr32,
+ xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+ "XEON B2B DSD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar5_addr32,
+ xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+ "XEON B2B DSD split-BAR 5 32-bit address");
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+ u64 low, high;
+
+ low = ioread32(mmio);
+ high = ioread32(mmio + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+ iowrite32(val, mmio);
+ iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+static inline int pdev_is_atom(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pdev_is_xeon(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ return 1;
+ }
+ return 0;
+}
+
+static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
+{
+ ndev->unsafe_flags = 0;
+ ndev->unsafe_flags_ignore = 0;
+
+ /* Only B2B has a workaround to avoid SDOORBELL */
+ if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
+ if (!ntb_topo_is_b2b(ndev->ntb.topo))
+ ndev->unsafe_flags |= NTB_UNSAFE_DB;
+
+ /* No low level workaround to avoid SB01BASE */
+ if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
+ ndev->unsafe_flags |= NTB_UNSAFE_DB;
+ ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
+ }
+}
+
+static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
+ unsigned long flag)
+{
+ return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
+}
+
+static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
+ unsigned long flag)
+{
+ flag &= ndev->unsafe_flags;
+ ndev->unsafe_flags_ignore |= flag;
+
+ return !!flag;
+}
+
+static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
+{
+ if (idx < 0 || idx > ndev->mw_count)
+ return -EINVAL;
+ return ndev->reg->mw_bar[idx];
+}
+
+static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ phys_addr_t reg_addr, unsigned long reg)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+ pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+ if (db_addr) {
+ *db_addr = reg_addr + reg;
+ dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
+ }
+
+ if (db_size) {
+ *db_size = ndev->reg->db_size;
+ dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
+ }
+
+ return 0;
+}
+
+static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
+ void __iomem *mmio)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+ pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+ return ndev->reg->db_ioread(mmio);
+}
+
+static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
+ void __iomem *mmio)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+ pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+ if (db_bits & ~ndev->db_valid_mask)
+ return -EINVAL;
+
+ ndev->reg->db_iowrite(db_bits, mmio);
+
+ return 0;
+}
+
+static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+ void __iomem *mmio)
+{
+ unsigned long irqflags;
+
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+ pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+ if (db_bits & ~ndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+ {
+ ndev->db_mask |= db_bits;
+ ndev->reg->db_iowrite(ndev->db_mask, mmio);
+ }
+ spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+ void __iomem *mmio)
+{
+ unsigned long irqflags;
+
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+ pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+ if (db_bits & ~ndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+ {
+ ndev->db_mask &= ~db_bits;
+ ndev->reg->db_iowrite(ndev->db_mask, mmio);
+ }
+ spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
+{
+ u64 shift, mask;
+
+ shift = ndev->db_vec_shift;
+ mask = BIT_ULL(shift) - 1;
+
+ return mask << (shift * db_vector);
+}
+
+static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
+ phys_addr_t *spad_addr, phys_addr_t reg_addr,
+ unsigned long reg)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+ pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+ if (idx < 0 || idx >= ndev->spad_count)
+ return -EINVAL;
+
+ if (spad_addr) {
+ *spad_addr = reg_addr + reg + (idx << 2);
+ dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
+ }
+
+ return 0;
+}
+
+static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
+ void __iomem *mmio)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+ pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+ if (idx < 0 || idx >= ndev->spad_count)
+ return 0;
+
+ return ioread32(mmio + (idx << 2));
+}
+
+static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
+ void __iomem *mmio)
+{
+ if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+ pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+ if (idx < 0 || idx >= ndev->spad_count)
+ return -EINVAL;
+
+ iowrite32(val, mmio + (idx << 2));
+
+ return 0;
+}
+
+static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
+{
+ u64 vec_mask;
+
+ vec_mask = ndev_vec_mask(ndev, vec);
+
+ dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
+
+ ndev->last_ts = jiffies;
+
+ if (vec_mask & ndev->db_link_mask) {
+ if (ndev->reg->poll_link(ndev))
+ ntb_link_event(&ndev->ntb);
+ }
+
+ if (vec_mask & ndev->db_valid_mask)
+ ntb_db_event(&ndev->ntb, vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ndev_vec_isr(int irq, void *dev)
+{
+ struct intel_ntb_vec *nvec = dev;
+
+ return ndev_interrupt(nvec->ndev, nvec->num);
+}
+
+static irqreturn_t ndev_irq_isr(int irq, void *dev)
+{
+ struct intel_ntb_dev *ndev = dev;
+
+ return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
+}
+
+static int ndev_init_isr(struct intel_ntb_dev *ndev,
+ int msix_min, int msix_max,
+ int msix_shift, int total_shift)
+{
+ struct pci_dev *pdev;
+ int rc, i, msix_count, node;
+
+ pdev = ndev_pdev(ndev);
+
+ node = dev_to_node(&pdev->dev);
+
+ /* Mask all doorbell interrupts */
+ ndev->db_mask = ndev->db_valid_mask;
+ ndev->reg->db_iowrite(ndev->db_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ /* Try to set up msix irq */
+
+ ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
+ GFP_KERNEL, node);
+ if (!ndev->vec)
+ goto err_msix_vec_alloc;
+
+ ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
+ GFP_KERNEL, node);
+ if (!ndev->msix)
+ goto err_msix_alloc;
+
+ for (i = 0; i < msix_max; ++i)
+ ndev->msix[i].entry = i;
+
+ msix_count = pci_enable_msix_range(pdev, ndev->msix,
+ msix_min, msix_max);
+ if (msix_count < 0)
+ goto err_msix_enable;
+
+ for (i = 0; i < msix_count; ++i) {
+ ndev->vec[i].ndev = ndev;
+ ndev->vec[i].num = i;
+ rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
+ "ndev_vec_isr", &ndev->vec[i]);
+ if (rc)
+ goto err_msix_request;
+ }
+
+ dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
+ ndev->db_vec_count = msix_count;
+ ndev->db_vec_shift = msix_shift;
+ return 0;
+
+err_msix_request:
+ while (i-- > 0)
+ free_irq(ndev->msix[i].vector, ndev);
+ pci_disable_msix(pdev);
+err_msix_enable:
+ kfree(ndev->msix);
+err_msix_alloc:
+ kfree(ndev->vec);
+err_msix_vec_alloc:
+ ndev->msix = NULL;
+ ndev->vec = NULL;
+
+ /* Try to set up msi irq */
+
+ rc = pci_enable_msi(pdev);
+ if (rc)
+ goto err_msi_enable;
+
+ rc = request_irq(pdev->irq, ndev_irq_isr, 0,
+ "ndev_irq_isr", ndev);
+ if (rc)
+ goto err_msi_request;
+
+ dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
+ ndev->db_vec_count = 1;
+ ndev->db_vec_shift = total_shift;
+ return 0;
+
+err_msi_request:
+ pci_disable_msi(pdev);
+err_msi_enable:
+
+ /* Try to set up intx irq */
+
+ pci_intx(pdev, 1);
+
+ rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
+ "ndev_irq_isr", ndev);
+ if (rc)
+ goto err_intx_request;
+
+ dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
+ ndev->db_vec_count = 1;
+ ndev->db_vec_shift = total_shift;
+ return 0;
+
+err_intx_request:
+ return rc;
+}
+
+static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ pdev = ndev_pdev(ndev);
+
+ /* Mask all doorbell interrupts */
+ ndev->db_mask = ndev->db_valid_mask;
+ ndev->reg->db_iowrite(ndev->db_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ if (ndev->msix) {
+ i = ndev->db_vec_count;
+ while (i--)
+ free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+ pci_disable_msix(pdev);
+ kfree(ndev->msix);
+ kfree(ndev->vec);
+ } else {
+ free_irq(pdev->irq, ndev);
+ if (pci_dev_msi_enabled(pdev))
+ pci_disable_msi(pdev);
+ }
+}
+
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev;
+ void __iomem *mmio;
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off;
+ union { u64 v64; u32 v32; u16 v16; } u;
+
+ ndev = filp->private_data;
+ mmio = ndev->self_mmio;
+
+ buf_size = min(count, 0x800ul);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ off = 0;
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB Device Information:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Connection Topology -\t%s\n",
+ ntb_topo_string(ndev->ntb.topo));
+
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
+ off += scnprintf(buf + off, buf_size - off,
+ "BAR4 Split -\t\t%s\n",
+ ndev->bar4_split ? "yes" : "no");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+ if (!ndev->reg->link_is_up(ndev)) {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tDown\n");
+ } else {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tUp\n");
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Speed -\t\tPCI-E Gen %u\n",
+ NTB_LNK_STA_SPEED(ndev->lnk_sta));
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Width -\t\tx%u\n",
+ NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Memory Window Count -\t%u\n", ndev->mw_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Scratchpad Count -\t%u\n", ndev->spad_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Count -\t%u\n", ndev->db_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Incoming XLAT:\n");
+
+ u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
+ off += scnprintf(buf + off, buf_size - off,
+ "XLAT23 -\t\t%#018llx\n", u.v64);
+
+ if (ndev->bar4_split) {
+ u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+ off += scnprintf(buf + off, buf_size - off,
+ "XLAT4 -\t\t\t%#06x\n", u.v32);
+
+ u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
+ off += scnprintf(buf + off, buf_size - off,
+ "XLAT5 -\t\t\t%#06x\n", u.v32);
+ } else {
+ u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+ off += scnprintf(buf + off, buf_size - off,
+ "XLAT45 -\t\t%#018llx\n", u.v64);
+ }
+
+ u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
+ off += scnprintf(buf + off, buf_size - off,
+ "LMT23 -\t\t\t%#018llx\n", u.v64);
+
+ if (ndev->bar4_split) {
+ u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+ off += scnprintf(buf + off, buf_size - off,
+ "LMT4 -\t\t\t%#06x\n", u.v32);
+ u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
+ off += scnprintf(buf + off, buf_size - off,
+ "LMT5 -\t\t\t%#06x\n", u.v32);
+ } else {
+ u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+ off += scnprintf(buf + off, buf_size - off,
+ "LMT45 -\t\t\t%#018llx\n", u.v64);
+ }
+
+ if (pdev_is_xeon(ndev->ntb.pdev)) {
+ if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Outgoing B2B XLAT:\n");
+
+ u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B XLAT23 -\t\t%#018llx\n", u.v64);
+
+ if (ndev->bar4_split) {
+ u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B XLAT4 -\t\t%#06x\n",
+ u.v32);
+ u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B XLAT5 -\t\t%#06x\n",
+ u.v32);
+ } else {
+ u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B XLAT45 -\t\t%#018llx\n",
+ u.v64);
+ }
+
+ u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B LMT23 -\t\t%#018llx\n", u.v64);
+
+ if (ndev->bar4_split) {
+ u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B LMT4 -\t\t%#06x\n",
+ u.v32);
+ u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B LMT5 -\t\t%#06x\n",
+ u.v32);
+ } else {
+ u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B LMT45 -\t\t%#018llx\n",
+ u.v64);
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Secondary BAR:\n");
+
+ u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR01 -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR23 -\t\t%#018llx\n", u.v64);
+
+ if (ndev->bar4_split) {
+ u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR4 -\t\t\t%#06x\n", u.v32);
+ u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR5 -\t\t\t%#06x\n", u.v32);
+ } else {
+ u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR45 -\t\t%#018llx\n",
+ u.v64);
+ }
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nXEON NTB Statistics:\n");
+
+ u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "Upstream Memory Miss -\t%u\n", u.v16);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nXEON NTB Hardware Errors:\n");
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ XEON_DEVSTS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "DEVSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ XEON_LINK_STATUS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "LNKSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ XEON_UNCERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ XEON_CORERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "CORERRSTS -\t\t%#06x\n", u.v32);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
+{
+ if (!debugfs_dir) {
+ ndev->debugfs_dir = NULL;
+ ndev->debugfs_info = NULL;
+ } else {
+ ndev->debugfs_dir =
+ debugfs_create_dir(ndev_name(ndev), debugfs_dir);
+ if (!ndev->debugfs_dir)
+ ndev->debugfs_info = NULL;
+ else
+ ndev->debugfs_info =
+ debugfs_create_file("info", S_IRUSR,
+ ndev->debugfs_dir, ndev,
+ &intel_ntb_debugfs_info);
+ }
+}
+
+static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+}
+
+static int intel_ntb_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->mw_count;
+}
+
+static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base,
+ resource_size_t *size,
+ resource_size_t *align,
+ resource_size_t *align_size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ int bar;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ if (base)
+ *base = pci_resource_start(ndev->ntb.pdev, bar) +
+ (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+ if (size)
+ *size = pci_resource_len(ndev->ntb.pdev, bar) -
+ (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+ if (align)
+ *align = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (align_size)
+ *align_size = 1;
+
+ return 0;
+}
+
+static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ unsigned long base_reg, xlat_reg, limit_reg;
+ resource_size_t bar_size, mw_size;
+ void __iomem *mmio;
+ u64 base, limit, reg_val;
+ int bar;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ /* hardware requires that addr is aligned to bar size */
+ if (addr & (bar_size - 1))
+ return -EINVAL;
+
+ /* make sure the range fits in the usable mw size */
+ if (size > mw_size)
+ return -EINVAL;
+
+ mmio = ndev->self_mmio;
+ base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
+ xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
+ limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
+
+ if (bar < 4 || !ndev->bar4_split) {
+ base = ioread64(mmio + base_reg);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = 0;
+
+ /* set and verify setting the translation address */
+ iowrite64(addr, mmio + xlat_reg);
+ reg_val = ioread64(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+ } else {
+ /* split bar addr range must all be 32 bit */
+ if (addr & (~0ull << 32))
+ return -EINVAL;
+ if ((addr + size) & (~0ull << 32))
+ return -EINVAL;
+
+ base = ioread32(mmio + base_reg);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = 0;
+
+ /* set and verify setting the translation address */
+ iowrite32(addr, mmio + xlat_reg);
+ reg_val = ioread32(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite32(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ /* set and verify setting the limit */
+ iowrite32(limit, mmio + limit_reg);
+ reg_val = ioread32(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite32(base, mmio + limit_reg);
+ iowrite32(0, mmio + xlat_reg);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ if (ndev->reg->link_is_up(ndev)) {
+ if (speed)
+ *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
+ if (width)
+ *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
+ return 1;
+ } else {
+ /* TODO MAYBE: is it possible to observe the link speed and
+ * width while link is training? */
+ if (speed)
+ *speed = NTB_SPEED_NONE;
+ if (width)
+ *width = NTB_WIDTH_NONE;
+ return 0;
+ }
+}
+
+static int intel_ntb_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_ctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ if (ndev->ntb.topo == NTB_TOPO_SEC)
+ return -EINVAL;
+
+ dev_dbg(ndev_dev(ndev),
+ "Enabling link with max_speed %d max_width %d\n",
+ max_speed, max_width);
+ if (max_speed != NTB_SPEED_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+ if (max_width != NTB_WIDTH_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+
+ ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+ ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+ ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+ if (ndev->bar4_split)
+ ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ return 0;
+}
+
+static int intel_ntb_link_disable(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_cntl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ if (ndev->ntb.topo == NTB_TOPO_SEC)
+ return -EINVAL;
+
+ dev_dbg(ndev_dev(ndev), "Disabling link\n");
+
+ /* Bring NTB link down */
+ ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
+ ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
+ if (ndev->bar4_split)
+ ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
+ ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
+ iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ return 0;
+}
+
+static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+ return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
+}
+
+static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ return ndev->db_vec_count;
+}
+
+static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ if (db_vector < 0 || db_vector > ndev->db_vec_count)
+ return 0;
+
+ return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
+}
+
+static u64 intel_ntb_db_read(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_read(ndev,
+ ndev->self_mmio +
+ ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_write(ndev, db_bits,
+ ndev->self_mmio +
+ ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_set_mask(ndev, db_bits,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_clear_mask(ndev, db_bits,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
+ phys_addr_t *db_addr,
+ resource_size_t *db_size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
+ ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_write(ndev, db_bits,
+ ndev->peer_mmio +
+ ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+ return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
+}
+
+static int intel_ntb_spad_count(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ return ndev->spad_count;
+}
+
+static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_spad_read(ndev, idx,
+ ndev->self_mmio +
+ ndev->self_reg->spad);
+}
+
+static int intel_ntb_spad_write(struct ntb_dev *ntb,
+ int idx, u32 val)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_spad_write(ndev, idx, val,
+ ndev->self_mmio +
+ ndev->self_reg->spad);
+}
+
+static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *spad_addr)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
+ ndev->peer_reg->spad);
+}
+
+static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_spad_read(ndev, idx,
+ ndev->peer_mmio +
+ ndev->peer_reg->spad);
+}
+
+static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
+ int idx, u32 val)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_spad_write(ndev, idx, val,
+ ndev->peer_mmio +
+ ndev->peer_reg->spad);
+}
+
+/* ATOM */
+
+static u64 atom_db_ioread(void __iomem *mmio)
+{
+ return ioread64(mmio);
+}
+
+static void atom_db_iowrite(u64 bits, void __iomem *mmio)
+{
+ iowrite64(bits, mmio);
+}
+
+static int atom_poll_link(struct intel_ntb_dev *ndev)
+{
+ u32 ntb_ctl;
+
+ ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
+
+ if (ntb_ctl == ndev->ntb_ctl)
+ return 0;
+
+ ndev->ntb_ctl = ntb_ctl;
+
+ ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
+
+ return 1;
+}
+
+static int atom_link_is_up(struct intel_ntb_dev *ndev)
+{
+ return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
+}
+
+static int atom_link_is_err(struct intel_ntb_dev *ndev)
+{
+ if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
+ & ATOM_LTSSMSTATEJMP_FORCEDETECT)
+ return 1;
+
+ if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
+ & ATOM_IBIST_ERR_OFLOW)
+ return 1;
+
+ return 0;
+}
+
+static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & ATOM_PPD_TOPO_MASK) {
+ case ATOM_PPD_TOPO_B2B_USD:
+ dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
+ return NTB_TOPO_B2B_USD;
+
+ case ATOM_PPD_TOPO_B2B_DSD:
+ dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
+ return NTB_TOPO_B2B_DSD;
+
+ case ATOM_PPD_TOPO_PRI_USD:
+ case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+ case ATOM_PPD_TOPO_SEC_USD:
+ case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+ dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
+ return NTB_TOPO_NONE;
+ }
+
+ dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
+ return NTB_TOPO_NONE;
+}
+
+static void atom_link_hb(struct work_struct *work)
+{
+ struct intel_ntb_dev *ndev = hb_ndev(work);
+ unsigned long poll_ts;
+ void __iomem *mmio;
+ u32 status32;
+
+ poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
+
+ /* Delay polling the link status if an interrupt was received,
+ * unless the cached link status says the link is down.
+ */
+ if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
+ schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
+ return;
+ }
+
+ if (atom_poll_link(ndev))
+ ntb_link_event(&ndev->ntb);
+
+ if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
+ schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+ return;
+ }
+
+ /* Link is down with error: recover the link! */
+
+ mmio = ndev->self_mmio;
+
+ /* Driver resets the NTB ModPhy lanes - magic! */
+ iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
+ iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
+ iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
+ iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
+
+ /* Driver waits 100ms to allow the NTB ModPhy to settle */
+ msleep(100);
+
+ /* Clear AER Errors, write to clear */
+ status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
+ dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
+ status32 &= PCI_ERR_COR_REP_ROLL;
+ iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
+
+ /* Clear unexpected electrical idle event in LTSSM, write to clear */
+ status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
+ dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
+ status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
+ iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
+
+ /* Clear DeSkew Buffer error, write to clear */
+ status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
+ dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
+ status32 |= ATOM_DESKEWSTS_DBERR;
+ iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
+
+ status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+ dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
+ status32 &= ATOM_IBIST_ERR_OFLOW;
+ iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+
+ /* Releases the NTB state machine to allow the link to retrain */
+ status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+ dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
+ status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
+ iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+
+ /* There is a potential race between the 2 NTB devices recovering at the
+ * same time. If the times are the same, the link will not recover and
+ * the driver will be stuck in this loop forever. Add a random interval
+ * to the recovery time to prevent this race.
+ */
+ schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
+ + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
+}
+
+static int atom_init_isr(struct intel_ntb_dev *ndev)
+{
+ int rc;
+
+ rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
+ ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
+ if (rc)
+ return rc;
+
+ /* ATOM doesn't have link status interrupt, poll on that platform */
+ ndev->last_ts = jiffies;
+ INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
+ schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+
+ return 0;
+}
+
+static void atom_deinit_isr(struct intel_ntb_dev *ndev)
+{
+ cancel_delayed_work_sync(&ndev->hb_timer);
+ ndev_deinit_isr(ndev);
+}
+
+static int atom_init_ntb(struct intel_ntb_dev *ndev)
+{
+ ndev->mw_count = ATOM_MW_COUNT;
+ ndev->spad_count = ATOM_SPAD_COUNT;
+ ndev->db_count = ATOM_DB_COUNT;
+
+ switch (ndev->ntb.topo) {
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ ndev->self_reg = &atom_pri_reg;
+ ndev->peer_reg = &atom_b2b_reg;
+ ndev->xlat_reg = &atom_sec_xlat;
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+ ndev->self_mmio + ATOM_SPCICMD_OFFSET);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ return 0;
+}
+
+static int atom_init_dev(struct intel_ntb_dev *ndev)
+{
+ u32 ppd;
+ int rc;
+
+ rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
+ if (rc)
+ return -EIO;
+
+ ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ rc = atom_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ rc = atom_init_isr(ndev);
+ if (rc)
+ return rc;
+
+ if (ndev->ntb.topo != NTB_TOPO_SEC) {
+ /* Initiate PCI-E link training */
+ rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
+ ppd | ATOM_PPD_INIT_LINK);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void atom_deinit_dev(struct intel_ntb_dev *ndev)
+{
+ atom_deinit_isr(ndev);
+}
+
+/* XEON */
+
+static u64 xeon_db_ioread(void __iomem *mmio)
+{
+ return (u64)ioread16(mmio);
+}
+
+static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
+{
+ iowrite16((u16)bits, mmio);
+}
+
+static int xeon_poll_link(struct intel_ntb_dev *ndev)
+{
+ u16 reg_val;
+ int rc;
+
+ ndev->reg->db_iowrite(ndev->db_link_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_bell);
+
+ rc = pci_read_config_word(ndev->ntb.pdev,
+ XEON_LINK_STATUS_OFFSET, &reg_val);
+ if (rc)
+ return 0;
+
+ if (reg_val == ndev->lnk_sta)
+ return 0;
+
+ ndev->lnk_sta = reg_val;
+
+ return 1;
+}
+
+static int xeon_link_is_up(struct intel_ntb_dev *ndev)
+{
+ if (ndev->ntb.topo == NTB_TOPO_SEC)
+ return 1;
+
+ return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
+}
+
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
+{
+ switch (ppd & XEON_PPD_TOPO_MASK) {
+ case XEON_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+
+ case XEON_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+
+ case XEON_PPD_TOPO_PRI_USD:
+ case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+ return NTB_TOPO_PRI;
+
+ case XEON_PPD_TOPO_SEC_USD:
+ case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+ return NTB_TOPO_SEC;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
+static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
+{
+ if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
+ dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
+ return 1;
+ }
+ return 0;
+}
+
+static int xeon_init_isr(struct intel_ntb_dev *ndev)
+{
+ return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
+ XEON_DB_MSIX_VECTOR_COUNT,
+ XEON_DB_MSIX_VECTOR_SHIFT,
+ XEON_DB_TOTAL_SHIFT);
+}
+
+static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
+{
+ ndev_deinit_isr(ndev);
+}
+
+static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
+ const struct intel_b2b_addr *addr,
+ const struct intel_b2b_addr *peer_addr)
+{
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ resource_size_t bar_size;
+ phys_addr_t bar_addr;
+ int b2b_bar;
+ u8 bar_sz;
+
+ pdev = ndev_pdev(ndev);
+ mmio = ndev->self_mmio;
+
+ if (ndev->b2b_idx >= ndev->mw_count) {
+ dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+ b2b_bar = 0;
+ ndev->b2b_off = 0;
+ } else {
+ b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+ if (b2b_bar < 0)
+ return -EIO;
+
+ dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+ dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+
+ if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using first half of bar\n");
+ ndev->b2b_off = bar_size >> 1;
+ } else if (XEON_B2B_MIN_SIZE <= bar_size) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using whole bar\n");
+ ndev->b2b_off = 0;
+ --ndev->mw_count;
+ } else {
+ dev_dbg(ndev_dev(ndev),
+ "b2b bar size is too small\n");
+ return -EIO;
+ }
+ }
+
+ /* Reset the secondary bar sizes to match the primary bar sizes,
+ * except disable or halve the size of the b2b secondary bar.
+ *
+ * Note: code for each specific bar size register, because the register
+ * offsets are not in a consistent order (bar5sz comes after ppd, odd).
+ */
+ pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
+ if (b2b_bar == 2) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+ pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
+
+ if (!ndev->bar4_split) {
+ pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
+ if (b2b_bar == 4) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+ pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
+ } else {
+ pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
+ if (b2b_bar == 4) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+ pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
+
+ pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
+ if (b2b_bar == 5) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+ pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
+ }
+
+ /* SBAR01 hit by first part of the b2b bar */
+ if (b2b_bar == 0)
+ bar_addr = addr->bar0_addr;
+ else if (b2b_bar == 2)
+ bar_addr = addr->bar2_addr64;
+ else if (b2b_bar == 4 && !ndev->bar4_split)
+ bar_addr = addr->bar4_addr64;
+ else if (b2b_bar == 4)
+ bar_addr = addr->bar4_addr32;
+ else if (b2b_bar == 5)
+ bar_addr = addr->bar5_addr32;
+ else
+ return -EIO;
+
+ dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
+ iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
+
+ /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
+ * The b2b bar is either disabled above, or configured half-size, and
+ * it starts at the PBAR xlat + offset.
+ */
+
+ bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
+ bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
+
+ if (!ndev->bar4_split) {
+ bar_addr = addr->bar4_addr64 +
+ (b2b_bar == 4 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
+ bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
+ } else {
+ bar_addr = addr->bar4_addr32 +
+ (b2b_bar == 4 ? ndev->b2b_off : 0);
+ iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
+ bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
+
+ bar_addr = addr->bar5_addr32 +
+ (b2b_bar == 5 ? ndev->b2b_off : 0);
+ iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
+ bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
+ }
+
+ /* setup incoming bar limits == base addrs (zero length windows) */
+
+ bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
+ bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
+
+ if (!ndev->bar4_split) {
+ bar_addr = addr->bar4_addr64 +
+ (b2b_bar == 4 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
+ bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
+ } else {
+ bar_addr = addr->bar4_addr32 +
+ (b2b_bar == 4 ? ndev->b2b_off : 0);
+ iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
+ bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
+
+ bar_addr = addr->bar5_addr32 +
+ (b2b_bar == 5 ? ndev->b2b_off : 0);
+ iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
+ bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
+ }
+
+ /* zero incoming translation addrs */
+ iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
+
+ if (!ndev->bar4_split) {
+ iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
+ } else {
+ iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
+ iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
+ }
+
+ /* zero outgoing translation limits (whole bar size windows) */
+ iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
+ if (!ndev->bar4_split) {
+ iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
+ } else {
+ iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
+ iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
+ }
+
+ /* set outgoing translation offsets */
+ bar_addr = peer_addr->bar2_addr64;
+ iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
+ bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
+
+ if (!ndev->bar4_split) {
+ bar_addr = peer_addr->bar4_addr64;
+ iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
+ bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
+ } else {
+ bar_addr = peer_addr->bar4_addr32;
+ iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
+ bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
+
+ bar_addr = peer_addr->bar5_addr32;
+ iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
+ bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
+ }
+
+ /* set the translation offset for b2b registers */
+ if (b2b_bar == 0)
+ bar_addr = peer_addr->bar0_addr;
+ else if (b2b_bar == 2)
+ bar_addr = peer_addr->bar2_addr64;
+ else if (b2b_bar == 4 && !ndev->bar4_split)
+ bar_addr = peer_addr->bar4_addr64;
+ else if (b2b_bar == 4)
+ bar_addr = peer_addr->bar4_addr32;
+ else if (b2b_bar == 5)
+ bar_addr = peer_addr->bar5_addr32;
+ else
+ return -EIO;
+
+ /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
+ dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
+ iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
+ iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
+
+ if (b2b_bar) {
+ /* map peer ntb mmio config space registers */
+ ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
+ XEON_B2B_MIN_SIZE);
+ if (!ndev->peer_mmio)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int xeon_init_ntb(struct intel_ntb_dev *ndev)
+{
+ int rc;
+ u32 ntb_ctl;
+
+ if (ndev->bar4_split)
+ ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
+ else
+ ndev->mw_count = XEON_MW_COUNT;
+
+ ndev->spad_count = XEON_SPAD_COUNT;
+ ndev->db_count = XEON_DB_COUNT;
+ ndev->db_link_mask = XEON_DB_LINK_BIT;
+
+ switch (ndev->ntb.topo) {
+ case NTB_TOPO_PRI:
+ if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+ dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
+ return -EINVAL;
+ }
+
+ /* enable link to allow secondary side device to appear */
+ ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_ctl &= ~NTB_CTL_DISABLE;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ /* use half the spads for the peer */
+ ndev->spad_count >>= 1;
+ ndev->self_reg = &xeon_pri_reg;
+ ndev->peer_reg = &xeon_sec_reg;
+ ndev->xlat_reg = &xeon_sec_xlat;
+ break;
+
+ case NTB_TOPO_SEC:
+ if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+ dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
+ return -EINVAL;
+ }
+ /* use half the spads for the peer */
+ ndev->spad_count >>= 1;
+ ndev->self_reg = &xeon_sec_reg;
+ ndev->peer_reg = &xeon_pri_reg;
+ ndev->xlat_reg = &xeon_pri_xlat;
+ break;
+
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ ndev->self_reg = &xeon_pri_reg;
+ ndev->peer_reg = &xeon_b2b_reg;
+ ndev->xlat_reg = &xeon_sec_xlat;
+
+ if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+ ndev->peer_reg = &xeon_pri_reg;
+
+ if (b2b_mw_idx < 0)
+ ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
+ else
+ ndev->b2b_idx = b2b_mw_idx;
+
+ dev_dbg(ndev_dev(ndev),
+ "setting up b2b mw idx %d means %d\n",
+ b2b_mw_idx, ndev->b2b_idx);
+
+ } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
+ dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
+ ndev->db_count -= 1;
+ }
+
+ if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+ rc = xeon_setup_b2b_mw(ndev,
+ &xeon_b2b_dsd_addr,
+ &xeon_b2b_usd_addr);
+ } else {
+ rc = xeon_setup_b2b_mw(ndev,
+ &xeon_b2b_usd_addr,
+ &xeon_b2b_dsd_addr);
+ }
+ if (rc)
+ return rc;
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+ ndev->self_mmio + XEON_SPCICMD_OFFSET);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ ndev->reg->db_iowrite(ndev->db_valid_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ return 0;
+}
+
+static int xeon_init_dev(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev;
+ u8 ppd;
+ int rc, mem;
+
+ pdev = ndev_pdev(ndev);
+
+ switch (pdev->device) {
+ /* There is a Xeon hardware errata related to writes to SDOORBELL or
+ * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
+ * which may hang the system. To workaround this use the second memory
+ * window to access the interrupt and scratch pad registers on the
+ * remote system.
+ */
+ case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
+ break;
+ }
+
+ switch (pdev->device) {
+ /* There is a hardware errata related to accessing any register in
+ * SB01BASE in the presence of bidirectional traffic crossing the NTB.
+ */
+ case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
+ break;
+ }
+
+ switch (pdev->device) {
+ /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
+ * mirrored to the remote system. Shrink the number of bits by one,
+ * since bit 14 is the last bit.
+ */
+ case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
+ break;
+ }
+
+ ndev->reg = &xeon_reg;
+
+ rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+ if (rc)
+ return -EIO;
+
+ ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+ dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+ ntb_topo_string(ndev->ntb.topo));
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ if (ndev->ntb.topo != NTB_TOPO_SEC) {
+ ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
+ dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
+ ppd, ndev->bar4_split);
+ } else {
+ /* This is a way for transparent BAR to figure out if we are
+ * doing split BAR or not. There is no way for the hw on the
+ * transparent side to know and set the PPD.
+ */
+ mem = pci_select_bars(pdev, IORESOURCE_MEM);
+ ndev->bar4_split = hweight32(mem) ==
+ HSX_SPLIT_BAR_MW_COUNT + 1;
+ dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
+ mem, ndev->bar4_split);
+ }
+
+ rc = xeon_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ return xeon_init_isr(ndev);
+}
+
+static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
+{
+ xeon_deinit_isr(ndev);
+}
+
+static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
+{
+ int rc;
+
+ pci_set_drvdata(pdev, ndev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_pci_enable;
+
+ rc = pci_request_regions(pdev, NTB_NAME);
+ if (rc)
+ goto err_pci_regions;
+
+ pci_set_master(pdev);
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err_dma_mask;
+ dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err_dma_mask;
+ dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
+ }
+
+ ndev->self_mmio = pci_iomap(pdev, 0, 0);
+ if (!ndev->self_mmio) {
+ rc = -EIO;
+ goto err_mmio;
+ }
+ ndev->peer_mmio = ndev->self_mmio;
+
+ return 0;
+
+err_mmio:
+err_dma_mask:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_pci_regions:
+ pci_disable_device(pdev);
+err_pci_enable:
+ pci_set_drvdata(pdev, NULL);
+ return rc;
+}
+
+static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev_pdev(ndev);
+
+ if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
+ pci_iounmap(pdev, ndev->peer_mmio);
+ pci_iounmap(pdev, ndev->self_mmio);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
+ struct pci_dev *pdev)
+{
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &intel_ntb_ops;
+
+ ndev->b2b_off = 0;
+ ndev->b2b_idx = INT_MAX;
+
+ ndev->bar4_split = 0;
+
+ ndev->mw_count = 0;
+ ndev->spad_count = 0;
+ ndev->db_count = 0;
+ ndev->db_vec_count = 0;
+ ndev->db_vec_shift = 0;
+
+ ndev->ntb_ctl = 0;
+ ndev->lnk_sta = 0;
+
+ ndev->db_valid_mask = 0;
+ ndev->db_link_mask = 0;
+ ndev->db_mask = 0;
+
+ spin_lock_init(&ndev->db_mask_lock);
+}
+
+static int intel_ntb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_ntb_dev *ndev;
+ int rc, node;
+
+ node = dev_to_node(&pdev->dev);
+
+ if (pdev_is_atom(pdev)) {
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
+
+ ndev_init_struct(ndev, pdev);
+
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+
+ rc = atom_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
+
+ } else if (pdev_is_xeon(pdev)) {
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
+
+ ndev_init_struct(ndev, pdev);
+
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+
+ rc = xeon_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
+
+ } else {
+ rc = -EINVAL;
+ goto err_ndev;
+ }
+
+ ndev_reset_unsafe_flags(ndev);
+
+ ndev->reg->poll_link(ndev);
+
+ ndev_init_debugfs(ndev);
+
+ rc = ntb_register_device(&ndev->ntb);
+ if (rc)
+ goto err_register;
+
+ dev_info(&pdev->dev, "NTB device registered.\n");
+
+ return 0;
+
+err_register:
+ ndev_deinit_debugfs(ndev);
+ if (pdev_is_atom(pdev))
+ atom_deinit_dev(ndev);
+ else if (pdev_is_xeon(pdev))
+ xeon_deinit_dev(ndev);
+err_init_dev:
+ intel_ntb_deinit_pci(ndev);
+err_init_pci:
+ kfree(ndev);
+err_ndev:
+ return rc;
+}
+
+static void intel_ntb_pci_remove(struct pci_dev *pdev)
+{
+ struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+ ntb_unregister_device(&ndev->ntb);
+ ndev_deinit_debugfs(ndev);
+ if (pdev_is_atom(pdev))
+ atom_deinit_dev(ndev);
+ else if (pdev_is_xeon(pdev))
+ xeon_deinit_dev(ndev);
+ intel_ntb_deinit_pci(ndev);
+ kfree(ndev);
+}
+
+static const struct intel_ntb_reg atom_reg = {
+ .poll_link = atom_poll_link,
+ .link_is_up = atom_link_is_up,
+ .db_ioread = atom_db_ioread,
+ .db_iowrite = atom_db_iowrite,
+ .db_size = sizeof(u64),
+ .ntb_ctl = ATOM_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg atom_pri_reg = {
+ .db_bell = ATOM_PDOORBELL_OFFSET,
+ .db_mask = ATOM_PDBMSK_OFFSET,
+ .spad = ATOM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg atom_b2b_reg = {
+ .db_bell = ATOM_B2B_DOORBELL_OFFSET,
+ .spad = ATOM_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg atom_sec_xlat = {
+ /* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
+ /* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
+ .bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
+};
+
+static const struct intel_ntb_reg xeon_reg = {
+ .poll_link = xeon_poll_link,
+ .link_is_up = xeon_link_is_up,
+ .db_ioread = xeon_db_ioread,
+ .db_iowrite = xeon_db_iowrite,
+ .db_size = sizeof(u32),
+ .ntb_ctl = XEON_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4, 5},
+};
+
+static const struct intel_ntb_alt_reg xeon_pri_reg = {
+ .db_bell = XEON_PDOORBELL_OFFSET,
+ .db_mask = XEON_PDBMSK_OFFSET,
+ .spad = XEON_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg xeon_sec_reg = {
+ .db_bell = XEON_SDOORBELL_OFFSET,
+ .db_mask = XEON_SDBMSK_OFFSET,
+ /* second half of the scratchpads */
+ .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
+};
+
+static const struct intel_ntb_alt_reg xeon_b2b_reg = {
+ .db_bell = XEON_B2B_DOORBELL_OFFSET,
+ .spad = XEON_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
+ /* Note: no primary .bar0_base visible to the secondary side.
+ *
+ * The secondary side cannot get the base address stored in primary
+ * bars. The base address is necessary to set the limit register to
+ * any value other than zero, or unlimited.
+ *
+ * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
+ * window by setting the limit equal to base, nor can it limit the size
+ * of the memory window by setting the limit to base + size.
+ */
+ .bar2_limit = XEON_PBAR23LMT_OFFSET,
+ .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
+ .bar0_base = XEON_SBAR0BASE_OFFSET,
+ .bar2_limit = XEON_SBAR23LMT_OFFSET,
+ .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
+};
+
+static struct intel_b2b_addr xeon_b2b_usd_addr = {
+ .bar2_addr64 = XEON_B2B_BAR2_USD_ADDR64,
+ .bar4_addr64 = XEON_B2B_BAR4_USD_ADDR64,
+ .bar4_addr32 = XEON_B2B_BAR4_USD_ADDR32,
+ .bar5_addr32 = XEON_B2B_BAR5_USD_ADDR32,
+};
+
+static struct intel_b2b_addr xeon_b2b_dsd_addr = {
+ .bar2_addr64 = XEON_B2B_BAR2_DSD_ADDR64,
+ .bar4_addr64 = XEON_B2B_BAR4_DSD_ADDR64,
+ .bar4_addr32 = XEON_B2B_BAR4_DSD_ADDR32,
+ .bar5_addr32 = XEON_B2B_BAR5_DSD_ADDR32,
+};
+
+/* operations for primary side of local ntb */
+static const struct ntb_dev_ops intel_ntb_ops = {
+ .mw_count = intel_ntb_mw_count,
+ .mw_get_range = intel_ntb_mw_get_range,
+ .mw_set_trans = intel_ntb_mw_set_trans,
+ .link_is_up = intel_ntb_link_is_up,
+ .link_enable = intel_ntb_link_enable,
+ .link_disable = intel_ntb_link_disable,
+ .db_is_unsafe = intel_ntb_db_is_unsafe,
+ .db_valid_mask = intel_ntb_db_valid_mask,
+ .db_vector_count = intel_ntb_db_vector_count,
+ .db_vector_mask = intel_ntb_db_vector_mask,
+ .db_read = intel_ntb_db_read,
+ .db_clear = intel_ntb_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .db_clear_mask = intel_ntb_db_clear_mask,
+ .peer_db_addr = intel_ntb_peer_db_addr,
+ .peer_db_set = intel_ntb_peer_db_set,
+ .spad_is_unsafe = intel_ntb_spad_is_unsafe,
+ .spad_count = intel_ntb_spad_count,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .peer_spad_addr = intel_ntb_peer_spad_addr,
+ .peer_spad_read = intel_ntb_peer_spad_read,
+ .peer_spad_write = intel_ntb_peer_spad_write,
+};
+
+static const struct file_operations intel_ntb_debugfs_info = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ndev_debugfs_read,
+};
+
+static const struct pci_device_id intel_ntb_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
+
+static struct pci_driver intel_ntb_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = intel_ntb_pci_tbl,
+ .probe = intel_ntb_pci_probe,
+ .remove = intel_ntb_pci_remove,
+};
+
+static int __init intel_ntb_pci_driver_init(void)
+{
+ pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+ if (debugfs_initialized())
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ return pci_register_driver(&intel_ntb_pci_driver);
+}
+module_init(intel_ntb_pci_driver_init);
+
+static void __exit intel_ntb_pci_driver_exit(void)
+{
+ pci_unregister_driver(&intel_ntb_pci_driver);
+
+ debugfs_remove_recursive(debugfs_dir);
+}
+module_exit(intel_ntb_pci_driver_exit);
+
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
new file mode 100644
index 000000000000..7ddaf387b679
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -0,0 +1,342 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#ifndef NTB_HW_INTEL_H
+#define NTB_HW_INTEL_H
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
+
+/* Intel Xeon hardware */
+
+#define XEON_PBAR23LMT_OFFSET 0x0000
+#define XEON_PBAR45LMT_OFFSET 0x0008
+#define XEON_PBAR4LMT_OFFSET 0x0008
+#define XEON_PBAR5LMT_OFFSET 0x000c
+#define XEON_PBAR23XLAT_OFFSET 0x0010
+#define XEON_PBAR45XLAT_OFFSET 0x0018
+#define XEON_PBAR4XLAT_OFFSET 0x0018
+#define XEON_PBAR5XLAT_OFFSET 0x001c
+#define XEON_SBAR23LMT_OFFSET 0x0020
+#define XEON_SBAR45LMT_OFFSET 0x0028
+#define XEON_SBAR4LMT_OFFSET 0x0028
+#define XEON_SBAR5LMT_OFFSET 0x002c
+#define XEON_SBAR23XLAT_OFFSET 0x0030
+#define XEON_SBAR45XLAT_OFFSET 0x0038
+#define XEON_SBAR4XLAT_OFFSET 0x0038
+#define XEON_SBAR5XLAT_OFFSET 0x003c
+#define XEON_SBAR0BASE_OFFSET 0x0040
+#define XEON_SBAR23BASE_OFFSET 0x0048
+#define XEON_SBAR45BASE_OFFSET 0x0050
+#define XEON_SBAR4BASE_OFFSET 0x0050
+#define XEON_SBAR5BASE_OFFSET 0x0054
+#define XEON_SBDF_OFFSET 0x005c
+#define XEON_NTBCNTL_OFFSET 0x0058
+#define XEON_PDOORBELL_OFFSET 0x0060
+#define XEON_PDBMSK_OFFSET 0x0062
+#define XEON_SDOORBELL_OFFSET 0x0064
+#define XEON_SDBMSK_OFFSET 0x0066
+#define XEON_USMEMMISS_OFFSET 0x0070
+#define XEON_SPAD_OFFSET 0x0080
+#define XEON_PBAR23SZ_OFFSET 0x00d0
+#define XEON_PBAR45SZ_OFFSET 0x00d1
+#define XEON_PBAR4SZ_OFFSET 0x00d1
+#define XEON_SBAR23SZ_OFFSET 0x00d2
+#define XEON_SBAR45SZ_OFFSET 0x00d3
+#define XEON_SBAR4SZ_OFFSET 0x00d3
+#define XEON_PPD_OFFSET 0x00d4
+#define XEON_PBAR5SZ_OFFSET 0x00d5
+#define XEON_SBAR5SZ_OFFSET 0x00d6
+#define XEON_WCCNTRL_OFFSET 0x00e0
+#define XEON_UNCERRSTS_OFFSET 0x014c
+#define XEON_CORERRSTS_OFFSET 0x0158
+#define XEON_LINK_STATUS_OFFSET 0x01a2
+#define XEON_SPCICMD_OFFSET 0x0504
+#define XEON_DEVCTRL_OFFSET 0x0598
+#define XEON_DEVSTS_OFFSET 0x059a
+#define XEON_SLINK_STATUS_OFFSET 0x05a2
+#define XEON_B2B_SPAD_OFFSET 0x0100
+#define XEON_B2B_DOORBELL_OFFSET 0x0140
+#define XEON_B2B_XLAT_OFFSETL 0x0144
+#define XEON_B2B_XLAT_OFFSETU 0x0148
+#define XEON_PPD_CONN_MASK 0x03
+#define XEON_PPD_CONN_TRANSPARENT 0x00
+#define XEON_PPD_CONN_B2B 0x01
+#define XEON_PPD_CONN_RP 0x02
+#define XEON_PPD_DEV_MASK 0x10
+#define XEON_PPD_DEV_USD 0x00
+#define XEON_PPD_DEV_DSD 0x10
+#define XEON_PPD_SPLIT_BAR_MASK 0x40
+
+#define XEON_PPD_TOPO_MASK (XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK)
+#define XEON_PPD_TOPO_PRI_USD (XEON_PPD_CONN_RP | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_PRI_DSD (XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_SEC_USD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_SEC_DSD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_B2B_USD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_B2B_DSD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD)
+
+#define XEON_MW_COUNT 2
+#define HSX_SPLIT_BAR_MW_COUNT 3
+#define XEON_DB_COUNT 15
+#define XEON_DB_LINK 15
+#define XEON_DB_LINK_BIT BIT_ULL(XEON_DB_LINK)
+#define XEON_DB_MSIX_VECTOR_COUNT 4
+#define XEON_DB_MSIX_VECTOR_SHIFT 5
+#define XEON_DB_TOTAL_SHIFT 16
+#define XEON_SPAD_COUNT 16
+
+/* Intel Atom hardware */
+
+#define ATOM_SBAR2XLAT_OFFSET 0x0008
+#define ATOM_PDOORBELL_OFFSET 0x0020
+#define ATOM_PDBMSK_OFFSET 0x0028
+#define ATOM_NTBCNTL_OFFSET 0x0060
+#define ATOM_SPAD_OFFSET 0x0080
+#define ATOM_PPD_OFFSET 0x00d4
+#define ATOM_PBAR2XLAT_OFFSET 0x8008
+#define ATOM_B2B_DOORBELL_OFFSET 0x8020
+#define ATOM_B2B_SPAD_OFFSET 0x8080
+#define ATOM_SPCICMD_OFFSET 0xb004
+#define ATOM_LINK_STATUS_OFFSET 0xb052
+#define ATOM_ERRCORSTS_OFFSET 0xb110
+#define ATOM_IP_BASE 0xc000
+#define ATOM_DESKEWSTS_OFFSET (ATOM_IP_BASE + 0x3024)
+#define ATOM_LTSSMERRSTS0_OFFSET (ATOM_IP_BASE + 0x3180)
+#define ATOM_LTSSMSTATEJMP_OFFSET (ATOM_IP_BASE + 0x3040)
+#define ATOM_IBSTERRRCRVSTS0_OFFSET (ATOM_IP_BASE + 0x3324)
+#define ATOM_MODPHY_PCSREG4 0x1c004
+#define ATOM_MODPHY_PCSREG6 0x1c006
+
+#define ATOM_PPD_INIT_LINK 0x0008
+#define ATOM_PPD_CONN_MASK 0x0300
+#define ATOM_PPD_CONN_TRANSPARENT 0x0000
+#define ATOM_PPD_CONN_B2B 0x0100
+#define ATOM_PPD_CONN_RP 0x0200
+#define ATOM_PPD_DEV_MASK 0x1000
+#define ATOM_PPD_DEV_USD 0x0000
+#define ATOM_PPD_DEV_DSD 0x1000
+#define ATOM_PPD_TOPO_MASK (ATOM_PPD_CONN_MASK | ATOM_PPD_DEV_MASK)
+#define ATOM_PPD_TOPO_PRI_USD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_PRI_DSD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_SEC_USD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_SEC_DSD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_B2B_USD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_B2B_DSD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_DSD)
+
+#define ATOM_MW_COUNT 2
+#define ATOM_DB_COUNT 34
+#define ATOM_DB_VALID_MASK (BIT_ULL(ATOM_DB_COUNT) - 1)
+#define ATOM_DB_MSIX_VECTOR_COUNT 34
+#define ATOM_DB_MSIX_VECTOR_SHIFT 1
+#define ATOM_DB_TOTAL_SHIFT 34
+#define ATOM_SPAD_COUNT 16
+
+#define ATOM_NTB_CTL_DOWN_BIT BIT(16)
+#define ATOM_NTB_CTL_ACTIVE(x) !(x & ATOM_NTB_CTL_DOWN_BIT)
+
+#define ATOM_DESKEWSTS_DBERR BIT(15)
+#define ATOM_LTSSMERRSTS0_UNEXPECTEDEI BIT(20)
+#define ATOM_LTSSMSTATEJMP_FORCEDETECT BIT(2)
+#define ATOM_IBIST_ERR_OFLOW 0x7FFF7FFF
+
+#define ATOM_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
+#define ATOM_LINK_RECOVERY_TIME msecs_to_jiffies(500)
+
+/* Ntb control and link status */
+
+#define NTB_CTL_CFG_LOCK BIT(0)
+#define NTB_CTL_DISABLE BIT(1)
+#define NTB_CTL_S2P_BAR2_SNOOP BIT(2)
+#define NTB_CTL_P2S_BAR2_SNOOP BIT(4)
+#define NTB_CTL_S2P_BAR4_SNOOP BIT(6)
+#define NTB_CTL_P2S_BAR4_SNOOP BIT(8)
+#define NTB_CTL_S2P_BAR5_SNOOP BIT(12)
+#define NTB_CTL_P2S_BAR5_SNOOP BIT(14)
+
+#define NTB_LNK_STA_ACTIVE_BIT 0x2000
+#define NTB_LNK_STA_SPEED_MASK 0x000f
+#define NTB_LNK_STA_WIDTH_MASK 0x03f0
+#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LNK_STA_ACTIVE_BIT))
+#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK)
+#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
+
+/* Use the following addresses for translation between b2b ntb devices in case
+ * the hardware default values are not reliable. */
+#define XEON_B2B_BAR0_USD_ADDR 0x1000000000000000ull
+#define XEON_B2B_BAR2_USD_ADDR64 0x2000000000000000ull
+#define XEON_B2B_BAR4_USD_ADDR64 0x4000000000000000ull
+#define XEON_B2B_BAR4_USD_ADDR32 0x20000000u
+#define XEON_B2B_BAR5_USD_ADDR32 0x40000000u
+#define XEON_B2B_BAR0_DSD_ADDR 0x9000000000000000ull
+#define XEON_B2B_BAR2_DSD_ADDR64 0xa000000000000000ull
+#define XEON_B2B_BAR4_DSD_ADDR64 0xc000000000000000ull
+#define XEON_B2B_BAR4_DSD_ADDR32 0xa0000000u
+#define XEON_B2B_BAR5_DSD_ADDR32 0xc0000000u
+
+/* The peer ntb secondary config space is 32KB fixed size */
+#define XEON_B2B_MIN_SIZE 0x8000
+
+/* flags to indicate hardware errata */
+#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
+#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
+#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
+
+/* flags to indicate unsafe api */
+#define NTB_UNSAFE_DB BIT_ULL(0)
+#define NTB_UNSAFE_SPAD BIT_ULL(1)
+
+struct intel_ntb_dev;
+
+struct intel_ntb_reg {
+ int (*poll_link)(struct intel_ntb_dev *ndev);
+ int (*link_is_up)(struct intel_ntb_dev *ndev);
+ u64 (*db_ioread)(void __iomem *mmio);
+ void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
+ unsigned long ntb_ctl;
+ resource_size_t db_size;
+ int mw_bar[];
+};
+
+struct intel_ntb_alt_reg {
+ unsigned long db_bell;
+ unsigned long db_mask;
+ unsigned long spad;
+};
+
+struct intel_ntb_xlat_reg {
+ unsigned long bar0_base;
+ unsigned long bar2_xlat;
+ unsigned long bar2_limit;
+};
+
+struct intel_b2b_addr {
+ phys_addr_t bar0_addr;
+ phys_addr_t bar2_addr64;
+ phys_addr_t bar4_addr64;
+ phys_addr_t bar4_addr32;
+ phys_addr_t bar5_addr32;
+};
+
+struct intel_ntb_vec {
+ struct intel_ntb_dev *ndev;
+ int num;
+};
+
+struct intel_ntb_dev {
+ struct ntb_dev ntb;
+
+ /* offset of peer bar0 in b2b bar */
+ unsigned long b2b_off;
+ /* mw idx used to access peer bar0 */
+ unsigned int b2b_idx;
+
+ /* BAR45 is split into BAR4 and BAR5 */
+ bool bar4_split;
+
+ u32 ntb_ctl;
+ u32 lnk_sta;
+
+ unsigned char mw_count;
+ unsigned char spad_count;
+ unsigned char db_count;
+ unsigned char db_vec_count;
+ unsigned char db_vec_shift;
+
+ u64 db_valid_mask;
+ u64 db_link_mask;
+ u64 db_mask;
+
+ /* synchronize rmw access of db_mask and hw reg */
+ spinlock_t db_mask_lock;
+
+ struct msix_entry *msix;
+ struct intel_ntb_vec *vec;
+
+ const struct intel_ntb_reg *reg;
+ const struct intel_ntb_alt_reg *self_reg;
+ const struct intel_ntb_alt_reg *peer_reg;
+ const struct intel_ntb_xlat_reg *xlat_reg;
+ void __iomem *self_mmio;
+ void __iomem *peer_mmio;
+ phys_addr_t peer_addr;
+
+ unsigned long last_ts;
+ struct delayed_work hb_timer;
+
+ unsigned long hwerr_flags;
+ unsigned long unsafe_flags;
+ unsigned long unsafe_flags_ignore;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_info;
+};
+
+#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
+#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
+#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
+#define ntb_ndev(ntb) container_of(ntb, struct intel_ntb_dev, ntb)
+#define hb_ndev(work) container_of(work, struct intel_ntb_dev, hb_timer.work)
+
+#endif
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
new file mode 100644
index 000000000000..23435f2a5486
--- /dev/null
+++ b/drivers/ntb/ntb.c
@@ -0,0 +1,251 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define DRIVER_NAME "ntb"
+#define DRIVER_DESCRIPTION "PCIe NTB Driver Framework"
+
+#define DRIVER_LICENSE "Dual BSD/GPL"
+#define DRIVER_VERSION "1.0"
+#define DRIVER_RELDATE "24 March 2015"
+#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct bus_type ntb_bus;
+static void ntb_dev_release(struct device *dev);
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+ const char *mod_name)
+{
+ if (!client)
+ return -EINVAL;
+ if (!ntb_client_ops_is_valid(&client->ops))
+ return -EINVAL;
+
+ memset(&client->drv, 0, sizeof(client->drv));
+ client->drv.bus = &ntb_bus;
+ client->drv.name = mod_name;
+ client->drv.owner = mod;
+
+ return driver_register(&client->drv);
+}
+EXPORT_SYMBOL(__ntb_register_client);
+
+void ntb_unregister_client(struct ntb_client *client)
+{
+ driver_unregister(&client->drv);
+}
+EXPORT_SYMBOL(ntb_unregister_client);
+
+int ntb_register_device(struct ntb_dev *ntb)
+{
+ if (!ntb)
+ return -EINVAL;
+ if (!ntb->pdev)
+ return -EINVAL;
+ if (!ntb->ops)
+ return -EINVAL;
+ if (!ntb_dev_ops_is_valid(ntb->ops))
+ return -EINVAL;
+
+ init_completion(&ntb->released);
+
+ memset(&ntb->dev, 0, sizeof(ntb->dev));
+ ntb->dev.bus = &ntb_bus;
+ ntb->dev.parent = &ntb->pdev->dev;
+ ntb->dev.release = ntb_dev_release;
+ dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+
+ ntb->ctx = NULL;
+ ntb->ctx_ops = NULL;
+ spin_lock_init(&ntb->ctx_lock);
+
+ return device_register(&ntb->dev);
+}
+EXPORT_SYMBOL(ntb_register_device);
+
+void ntb_unregister_device(struct ntb_dev *ntb)
+{
+ device_unregister(&ntb->dev);
+ wait_for_completion(&ntb->released);
+}
+EXPORT_SYMBOL(ntb_unregister_device);
+
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+ const struct ntb_ctx_ops *ctx_ops)
+{
+ unsigned long irqflags;
+
+ if (!ntb_ctx_ops_is_valid(ctx_ops))
+ return -EINVAL;
+ if (ntb->ctx_ops)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+ {
+ ntb->ctx = ctx;
+ ntb->ctx_ops = ctx_ops;
+ }
+ spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ntb_set_ctx);
+
+void ntb_clear_ctx(struct ntb_dev *ntb)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+ {
+ ntb->ctx_ops = NULL;
+ ntb->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_clear_ctx);
+
+void ntb_link_event(struct ntb_dev *ntb)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+ {
+ if (ntb->ctx_ops && ntb->ctx_ops->link_event)
+ ntb->ctx_ops->link_event(ntb->ctx);
+ }
+ spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_link_event);
+
+void ntb_db_event(struct ntb_dev *ntb, int vector)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+ {
+ if (ntb->ctx_ops && ntb->ctx_ops->db_event)
+ ntb->ctx_ops->db_event(ntb->ctx, vector);
+ }
+ spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_db_event);
+
+static int ntb_probe(struct device *dev)
+{
+ struct ntb_dev *ntb;
+ struct ntb_client *client;
+ int rc;
+
+ get_device(dev);
+ ntb = dev_ntb(dev);
+ client = drv_ntb_client(dev->driver);
+
+ rc = client->ops.probe(client, ntb);
+ if (rc)
+ put_device(dev);
+
+ return rc;
+}
+
+static int ntb_remove(struct device *dev)
+{
+ struct ntb_dev *ntb;
+ struct ntb_client *client;
+
+ if (dev->driver) {
+ ntb = dev_ntb(dev);
+ client = drv_ntb_client(dev->driver);
+
+ client->ops.remove(client, ntb);
+ put_device(dev);
+ }
+
+ return 0;
+}
+
+static void ntb_dev_release(struct device *dev)
+{
+ struct ntb_dev *ntb = dev_ntb(dev);
+
+ complete(&ntb->released);
+}
+
+static struct bus_type ntb_bus = {
+ .name = "ntb",
+ .probe = ntb_probe,
+ .remove = ntb_remove,
+};
+
+static int __init ntb_driver_init(void)
+{
+ return bus_register(&ntb_bus);
+}
+module_init(ntb_driver_init);
+
+static void __exit ntb_driver_exit(void)
+{
+ bus_unregister(&ntb_bus);
+}
+module_exit(ntb_driver_exit);
+
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
deleted file mode 100644
index 3f6738612f45..000000000000
--- a/drivers/ntb/ntb_hw.c
+++ /dev/null
@@ -1,1895 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copy
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include "ntb_hw.h"
-#include "ntb_regs.h"
-
-#define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver"
-#define NTB_VER "1.0"
-
-MODULE_DESCRIPTION(NTB_NAME);
-MODULE_VERSION(NTB_VER);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-enum {
- NTB_CONN_TRANSPARENT = 0,
- NTB_CONN_B2B,
- NTB_CONN_RP,
-};
-
-enum {
- NTB_DEV_USD = 0,
- NTB_DEV_DSD,
-};
-
-enum {
- SNB_HW = 0,
- BWD_HW,
-};
-
-static struct dentry *debugfs_dir;
-
-#define BWD_LINK_RECOVERY_TIME 500
-
-/* Translate memory window 0,1,2 to BAR 2,4,5 */
-#define MW_TO_BAR(mw) (mw == 0 ? 2 : (mw == 1 ? 4 : 5))
-
-static const struct pci_device_id ntb_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
- {0}
-};
-MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
-
-static int is_ntb_xeon(struct ntb_device *ndev)
-{
- switch (ndev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
- case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
- return 1;
- default:
- return 0;
- }
-
- return 0;
-}
-
-static int is_ntb_atom(struct ntb_device *ndev)
-{
- switch (ndev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
- return 1;
- default:
- return 0;
- }
-
- return 0;
-}
-
-static void ntb_set_errata_flags(struct ntb_device *ndev)
-{
- switch (ndev->pdev->device) {
- /*
- * this workaround applies to all platform up to IvyBridge
- * Haswell has splitbar support and use a different workaround
- */
- case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
- case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
- case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
- ndev->wa_flags |= WA_SNB_ERR;
- break;
- }
-}
-
-/**
- * ntb_register_event_callback() - register event callback
- * @ndev: pointer to ntb_device instance
- * @func: callback function to register
- *
- * This function registers a callback for any HW driver events such as link
- * up/down, power management notices and etc.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_register_event_callback(struct ntb_device *ndev,
- void (*func)(void *handle,
- enum ntb_hw_event event))
-{
- if (ndev->event_cb)
- return -EINVAL;
-
- ndev->event_cb = func;
-
- return 0;
-}
-
-/**
- * ntb_unregister_event_callback() - unregisters the event callback
- * @ndev: pointer to ntb_device instance
- *
- * This function unregisters the existing callback from transport
- */
-void ntb_unregister_event_callback(struct ntb_device *ndev)
-{
- ndev->event_cb = NULL;
-}
-
-static void ntb_irq_work(unsigned long data)
-{
- struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
- int rc;
-
- rc = db_cb->callback(db_cb->data, db_cb->db_num);
- if (rc)
- tasklet_schedule(&db_cb->irq_work);
- else {
- struct ntb_device *ndev = db_cb->ndev;
- unsigned long mask;
-
- mask = readw(ndev->reg_ofs.ldb_mask);
- clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.ldb_mask);
- }
-}
-
-/**
- * ntb_register_db_callback() - register a callback for doorbell interrupt
- * @ndev: pointer to ntb_device instance
- * @idx: doorbell index to register callback, zero based
- * @data: pointer to be returned to caller with every callback
- * @func: callback function to register
- *
- * This function registers a callback function for the doorbell interrupt
- * on the primary side. The function will unmask the doorbell as well to
- * allow interrupt.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
- void *data, int (*func)(void *data, int db_num))
-{
- unsigned long mask;
-
- if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
- dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
- return -EINVAL;
- }
-
- ndev->db_cb[idx].callback = func;
- ndev->db_cb[idx].data = data;
- ndev->db_cb[idx].ndev = ndev;
-
- tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
- (unsigned long) &ndev->db_cb[idx]);
-
- /* unmask interrupt */
- mask = readw(ndev->reg_ofs.ldb_mask);
- clear_bit(idx * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.ldb_mask);
-
- return 0;
-}
-
-/**
- * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
- * @ndev: pointer to ntb_device instance
- * @idx: doorbell index to register callback, zero based
- *
- * This function unregisters a callback function for the doorbell interrupt
- * on the primary side. The function will also mask the said doorbell.
- */
-void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
-{
- unsigned long mask;
-
- if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
- return;
-
- mask = readw(ndev->reg_ofs.ldb_mask);
- set_bit(idx * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.ldb_mask);
-
- tasklet_disable(&ndev->db_cb[idx].irq_work);
-
- ndev->db_cb[idx].callback = NULL;
-}
-
-/**
- * ntb_find_transport() - find the transport pointer
- * @transport: pointer to pci device
- *
- * Given the pci device pointer, return the transport pointer passed in when
- * the transport attached when it was inited.
- *
- * RETURNS: pointer to transport.
- */
-void *ntb_find_transport(struct pci_dev *pdev)
-{
- struct ntb_device *ndev = pci_get_drvdata(pdev);
- return ndev->ntb_transport;
-}
-
-/**
- * ntb_register_transport() - Register NTB transport with NTB HW driver
- * @transport: transport identifier
- *
- * This function allows a transport to reserve the hardware driver for
- * NTB usage.
- *
- * RETURNS: pointer to ntb_device, NULL on error.
- */
-struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
-{
- struct ntb_device *ndev = pci_get_drvdata(pdev);
-
- if (ndev->ntb_transport)
- return NULL;
-
- ndev->ntb_transport = transport;
- return ndev;
-}
-
-/**
- * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
- * @ndev - ntb_device of the transport to be freed
- *
- * This function unregisters the transport from the HW driver and performs any
- * necessary cleanups.
- */
-void ntb_unregister_transport(struct ntb_device *ndev)
-{
- int i;
-
- if (!ndev->ntb_transport)
- return;
-
- for (i = 0; i < ndev->max_cbs; i++)
- ntb_unregister_db_callback(ndev, i);
-
- ntb_unregister_event_callback(ndev);
- ndev->ntb_transport = NULL;
-}
-
-/**
- * ntb_write_local_spad() - write to the secondary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. This writes over the data mirrored to the local scratchpad register
- * by the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
-{
- if (idx >= ndev->limits.max_spads)
- return -EINVAL;
-
- dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
- val, idx);
- writel(val, ndev->reg_ofs.spad_read + idx * 4);
-
- return 0;
-}
-
-/**
- * ntb_read_local_spad() - read from the primary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side. This allows the local system to read data
- * written and mirrored to the scratchpad register by the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
-{
- if (idx >= ndev->limits.max_spads)
- return -EINVAL;
-
- *val = readl(ndev->reg_ofs.spad_write + idx * 4);
- dev_dbg(&ndev->pdev->dev,
- "Reading %x from local scratch pad index %d\n", *val, idx);
-
- return 0;
-}
-
-/**
- * ntb_write_remote_spad() - write to the secondary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. The register resides on the secondary (external) side. This allows
- * the local system to write data to be mirrored to the remote systems
- * scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
-{
- if (idx >= ndev->limits.max_spads)
- return -EINVAL;
-
- dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
- val, idx);
- writel(val, ndev->reg_ofs.spad_write + idx * 4);
-
- return 0;
-}
-
-/**
- * ntb_read_remote_spad() - read from the primary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side. This alloows the local system to read the data
- * it wrote to be mirrored on the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
-{
- if (idx >= ndev->limits.max_spads)
- return -EINVAL;
-
- *val = readl(ndev->reg_ofs.spad_read + idx * 4);
- dev_dbg(&ndev->pdev->dev,
- "Reading %x from remote scratch pad index %d\n", *val, idx);
-
- return 0;
-}
-
-/**
- * ntb_get_mw_base() - get addr for the NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the base address of the memory window specified.
- *
- * RETURNS: address, or NULL on error.
- */
-resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw)
-{
- if (mw >= ntb_max_mw(ndev))
- return 0;
-
- return pci_resource_start(ndev->pdev, MW_TO_BAR(mw));
-}
-
-/**
- * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the base virtual address of the memory window
- * specified.
- *
- * RETURNS: pointer to virtual address, or NULL on error.
- */
-void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
-{
- if (mw >= ntb_max_mw(ndev))
- return NULL;
-
- return ndev->mw[mw].vbase;
-}
-
-/**
- * ntb_get_mw_size() - return size of NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the physical size of the memory window specified
- *
- * RETURNS: the size of the memory window or zero on error
- */
-u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
-{
- if (mw >= ntb_max_mw(ndev))
- return 0;
-
- return ndev->mw[mw].bar_sz;
-}
-
-/**
- * ntb_set_mw_addr - set the memory window address
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- * @addr: base address for data
- *
- * This function sets the base physical address of the memory window. This
- * memory address is where data from the remote system will be transfered into
- * or out of depending on how the transport is configured.
- */
-void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
-{
- if (mw >= ntb_max_mw(ndev))
- return;
-
- dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
- MW_TO_BAR(mw));
-
- ndev->mw[mw].phys_addr = addr;
-
- switch (MW_TO_BAR(mw)) {
- case NTB_BAR_23:
- writeq(addr, ndev->reg_ofs.bar2_xlat);
- break;
- case NTB_BAR_4:
- if (ndev->split_bar)
- writel(addr, ndev->reg_ofs.bar4_xlat);
- else
- writeq(addr, ndev->reg_ofs.bar4_xlat);
- break;
- case NTB_BAR_5:
- writel(addr, ndev->reg_ofs.bar5_xlat);
- break;
- }
-}
-
-/**
- * ntb_ring_doorbell() - Set the doorbell on the secondary/external side
- * @ndev: pointer to ntb_device instance
- * @db: doorbell to ring
- *
- * This function allows triggering of a doorbell on the secondary/external
- * side that will initiate an interrupt on the remote host
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int db)
-{
- dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
-
- if (ndev->hw_type == BWD_HW)
- writeq((u64) 1 << db, ndev->reg_ofs.rdb);
- else
- writew(((1 << ndev->bits_per_vector) - 1) <<
- (db * ndev->bits_per_vector), ndev->reg_ofs.rdb);
-}
-
-static void bwd_recover_link(struct ntb_device *ndev)
-{
- u32 status;
-
- /* Driver resets the NTB ModPhy lanes - magic! */
- writeb(0xe0, ndev->reg_base + BWD_MODPHY_PCSREG6);
- writeb(0x40, ndev->reg_base + BWD_MODPHY_PCSREG4);
- writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG4);
- writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG6);
-
- /* Driver waits 100ms to allow the NTB ModPhy to settle */
- msleep(100);
-
- /* Clear AER Errors, write to clear */
- status = readl(ndev->reg_base + BWD_ERRCORSTS_OFFSET);
- dev_dbg(&ndev->pdev->dev, "ERRCORSTS = %x\n", status);
- status &= PCI_ERR_COR_REP_ROLL;
- writel(status, ndev->reg_base + BWD_ERRCORSTS_OFFSET);
-
- /* Clear unexpected electrical idle event in LTSSM, write to clear */
- status = readl(ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
- dev_dbg(&ndev->pdev->dev, "LTSSMERRSTS0 = %x\n", status);
- status |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
- writel(status, ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
-
- /* Clear DeSkew Buffer error, write to clear */
- status = readl(ndev->reg_base + BWD_DESKEWSTS_OFFSET);
- dev_dbg(&ndev->pdev->dev, "DESKEWSTS = %x\n", status);
- status |= BWD_DESKEWSTS_DBERR;
- writel(status, ndev->reg_base + BWD_DESKEWSTS_OFFSET);
-
- status = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
- dev_dbg(&ndev->pdev->dev, "IBSTERRRCRVSTS0 = %x\n", status);
- status &= BWD_IBIST_ERR_OFLOW;
- writel(status, ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
-
- /* Releases the NTB state machine to allow the link to retrain */
- status = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
- dev_dbg(&ndev->pdev->dev, "LTSSMSTATEJMP = %x\n", status);
- status &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
- writel(status, ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
-}
-
-static void ntb_link_event(struct ntb_device *ndev, int link_state)
-{
- unsigned int event;
-
- if (ndev->link_status == link_state)
- return;
-
- if (link_state == NTB_LINK_UP) {
- u16 status;
-
- dev_info(&ndev->pdev->dev, "Link Up\n");
- ndev->link_status = NTB_LINK_UP;
- event = NTB_EVENT_HW_LINK_UP;
-
- if (is_ntb_atom(ndev) ||
- ndev->conn_type == NTB_CONN_TRANSPARENT)
- status = readw(ndev->reg_ofs.lnk_stat);
- else {
- int rc = pci_read_config_word(ndev->pdev,
- SNB_LINK_STATUS_OFFSET,
- &status);
- if (rc)
- return;
- }
-
- ndev->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4;
- ndev->link_speed = (status & NTB_LINK_SPEED_MASK);
- dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
- ndev->link_width, ndev->link_speed);
- } else {
- dev_info(&ndev->pdev->dev, "Link Down\n");
- ndev->link_status = NTB_LINK_DOWN;
- event = NTB_EVENT_HW_LINK_DOWN;
- /* Don't modify link width/speed, we need it in link recovery */
- }
-
- /* notify the upper layer if we have an event change */
- if (ndev->event_cb)
- ndev->event_cb(ndev->ntb_transport, event);
-}
-
-static int ntb_link_status(struct ntb_device *ndev)
-{
- int link_state;
-
- if (is_ntb_atom(ndev)) {
- u32 ntb_cntl;
-
- ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
- if (ntb_cntl & BWD_CNTL_LINK_DOWN)
- link_state = NTB_LINK_DOWN;
- else
- link_state = NTB_LINK_UP;
- } else {
- u16 status;
- int rc;
-
- rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
- &status);
- if (rc)
- return rc;
-
- if (status & NTB_LINK_STATUS_ACTIVE)
- link_state = NTB_LINK_UP;
- else
- link_state = NTB_LINK_DOWN;
- }
-
- ntb_link_event(ndev, link_state);
-
- return 0;
-}
-
-static void bwd_link_recovery(struct work_struct *work)
-{
- struct ntb_device *ndev = container_of(work, struct ntb_device,
- lr_timer.work);
- u32 status32;
-
- bwd_recover_link(ndev);
- /* There is a potential race between the 2 NTB devices recovering at the
- * same time. If the times are the same, the link will not recover and
- * the driver will be stuck in this loop forever. Add a random interval
- * to the recovery time to prevent this race.
- */
- msleep(BWD_LINK_RECOVERY_TIME + prandom_u32() % BWD_LINK_RECOVERY_TIME);
-
- status32 = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
- if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT)
- goto retry;
-
- status32 = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
- if (status32 & BWD_IBIST_ERR_OFLOW)
- goto retry;
-
- status32 = readl(ndev->reg_ofs.lnk_cntl);
- if (!(status32 & BWD_CNTL_LINK_DOWN)) {
- unsigned char speed, width;
- u16 status16;
-
- status16 = readw(ndev->reg_ofs.lnk_stat);
- width = (status16 & NTB_LINK_WIDTH_MASK) >> 4;
- speed = (status16 & NTB_LINK_SPEED_MASK);
- if (ndev->link_width != width || ndev->link_speed != speed)
- goto retry;
- }
-
- schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
- return;
-
-retry:
- schedule_delayed_work(&ndev->lr_timer, NTB_HB_TIMEOUT);
-}
-
-/* BWD doesn't have link status interrupt, poll on that platform */
-static void bwd_link_poll(struct work_struct *work)
-{
- struct ntb_device *ndev = container_of(work, struct ntb_device,
- hb_timer.work);
- unsigned long ts = jiffies;
-
- /* If we haven't gotten an interrupt in a while, check the BWD link
- * status bit
- */
- if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
- int rc = ntb_link_status(ndev);
- if (rc)
- dev_err(&ndev->pdev->dev,
- "Error determining link status\n");
-
- /* Check to see if a link error is the cause of the link down */
- if (ndev->link_status == NTB_LINK_DOWN) {
- u32 status32 = readl(ndev->reg_base +
- BWD_LTSSMSTATEJMP_OFFSET);
- if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT) {
- schedule_delayed_work(&ndev->lr_timer, 0);
- return;
- }
- }
- }
-
- schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
-}
-
-static int ntb_xeon_setup(struct ntb_device *ndev)
-{
- switch (ndev->conn_type) {
- case NTB_CONN_B2B:
- ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
- ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
- ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
- ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
- ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
- if (ndev->split_bar)
- ndev->reg_ofs.bar5_xlat =
- ndev->reg_base + SNB_SBAR5XLAT_OFFSET;
- ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
-
- /* There is a Xeon hardware errata related to writes to
- * SDOORBELL or B2BDOORBELL in conjunction with inbound access
- * to NTB MMIO Space, which may hang the system. To workaround
- * this use the second memory window to access the interrupt and
- * scratch pad registers on the remote system.
- */
- if (ndev->wa_flags & WA_SNB_ERR) {
- if (!ndev->mw[ndev->limits.max_mw - 1].bar_sz)
- return -EINVAL;
-
- ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
- ndev->reg_ofs.spad_write =
- ndev->mw[ndev->limits.max_mw - 1].vbase +
- SNB_SPAD_OFFSET;
- ndev->reg_ofs.rdb =
- ndev->mw[ndev->limits.max_mw - 1].vbase +
- SNB_PDOORBELL_OFFSET;
-
- /* Set the Limit register to 4k, the minimum size, to
- * prevent an illegal access
- */
- writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
- SNB_PBAR4LMT_OFFSET);
- /* HW errata on the Limit registers. They can only be
- * written when the base register is 4GB aligned and
- * < 32bit. This should already be the case based on
- * the driver defaults, but write the Limit registers
- * first just in case.
- */
-
- ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
- } else {
- /* HW Errata on bit 14 of b2bdoorbell register. Writes
- * will not be mirrored to the remote system. Shrink
- * the number of bits by one, since bit 14 is the last
- * bit.
- */
- ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
- ndev->reg_ofs.spad_write = ndev->reg_base +
- SNB_B2B_SPAD_OFFSET;
- ndev->reg_ofs.rdb = ndev->reg_base +
- SNB_B2B_DOORBELL_OFFSET;
-
- /* Disable the Limit register, just incase it is set to
- * something silly. A 64bit write should handle it
- * regardless of whether it has a split BAR or not.
- */
- writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
- /* HW errata on the Limit registers. They can only be
- * written when the base register is 4GB aligned and
- * < 32bit. This should already be the case based on
- * the driver defaults, but write the Limit registers
- * first just in case.
- */
- if (ndev->split_bar)
- ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
- else
- ndev->limits.max_mw = SNB_MAX_MW;
- }
-
- /* The Xeon errata workaround requires setting SBAR Base
- * addresses to known values, so that the PBAR XLAT can be
- * pointed at SBAR0 of the remote system.
- */
- if (ndev->dev_type == NTB_DEV_USD) {
- writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
- SNB_PBAR2XLAT_OFFSET);
- if (ndev->wa_flags & WA_SNB_ERR)
- writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
- else {
- if (ndev->split_bar) {
- writel(SNB_MBAR4_DSD_ADDR,
- ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
- writel(SNB_MBAR5_DSD_ADDR,
- ndev->reg_base +
- SNB_PBAR5XLAT_OFFSET);
- } else
- writeq(SNB_MBAR4_DSD_ADDR,
- ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
-
- /* B2B_XLAT_OFFSET is a 64bit register, but can
- * only take 32bit writes
- */
- writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
- ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
- writel(SNB_MBAR01_DSD_ADDR >> 32,
- ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
- }
-
- writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
- SNB_SBAR0BASE_OFFSET);
- writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
- SNB_SBAR2BASE_OFFSET);
- if (ndev->split_bar) {
- writel(SNB_MBAR4_USD_ADDR, ndev->reg_base +
- SNB_SBAR4BASE_OFFSET);
- writel(SNB_MBAR5_USD_ADDR, ndev->reg_base +
- SNB_SBAR5BASE_OFFSET);
- } else
- writeq(SNB_MBAR4_USD_ADDR, ndev->reg_base +
- SNB_SBAR4BASE_OFFSET);
- } else {
- writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
- SNB_PBAR2XLAT_OFFSET);
- if (ndev->wa_flags & WA_SNB_ERR)
- writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
- else {
- if (ndev->split_bar) {
- writel(SNB_MBAR4_USD_ADDR,
- ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
- writel(SNB_MBAR5_USD_ADDR,
- ndev->reg_base +
- SNB_PBAR5XLAT_OFFSET);
- } else
- writeq(SNB_MBAR4_USD_ADDR,
- ndev->reg_base +
- SNB_PBAR4XLAT_OFFSET);
-
- /*
- * B2B_XLAT_OFFSET is a 64bit register, but can
- * only take 32bit writes
- */
- writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
- ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
- writel(SNB_MBAR01_USD_ADDR >> 32,
- ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
- }
- writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
- SNB_SBAR0BASE_OFFSET);
- writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
- SNB_SBAR2BASE_OFFSET);
- if (ndev->split_bar) {
- writel(SNB_MBAR4_DSD_ADDR, ndev->reg_base +
- SNB_SBAR4BASE_OFFSET);
- writel(SNB_MBAR5_DSD_ADDR, ndev->reg_base +
- SNB_SBAR5BASE_OFFSET);
- } else
- writeq(SNB_MBAR4_DSD_ADDR, ndev->reg_base +
- SNB_SBAR4BASE_OFFSET);
-
- }
- break;
- case NTB_CONN_RP:
- if (ndev->wa_flags & WA_SNB_ERR) {
- dev_err(&ndev->pdev->dev,
- "NTB-RP disabled due to hardware errata.\n");
- return -EINVAL;
- }
-
- /* Scratch pads need to have exclusive access from the primary
- * or secondary side. Halve the num spads so that each side can
- * have an equal amount.
- */
- ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
- ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
- /* Note: The SDOORBELL is the cause of the errata. You REALLY
- * don't want to touch it.
- */
- ndev->reg_ofs.rdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
- ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
- ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
- /* Offset the start of the spads to correspond to whether it is
- * primary or secondary
- */
- ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET +
- ndev->limits.max_spads * 4;
- ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
- ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
- ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
- if (ndev->split_bar) {
- ndev->reg_ofs.bar5_xlat =
- ndev->reg_base + SNB_SBAR5XLAT_OFFSET;
- ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
- } else
- ndev->limits.max_mw = SNB_MAX_MW;
- break;
- case NTB_CONN_TRANSPARENT:
- if (ndev->wa_flags & WA_SNB_ERR) {
- dev_err(&ndev->pdev->dev,
- "NTB-TRANSPARENT disabled due to hardware errata.\n");
- return -EINVAL;
- }
-
- /* Scratch pads need to have exclusive access from the primary
- * or secondary side. Halve the num spads so that each side can
- * have an equal amount.
- */
- ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
- ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
- ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
- ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
- ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
- /* Offset the start of the spads to correspond to whether it is
- * primary or secondary
- */
- ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET +
- ndev->limits.max_spads * 4;
- ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_PBAR2XLAT_OFFSET;
- ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_PBAR4XLAT_OFFSET;
-
- if (ndev->split_bar) {
- ndev->reg_ofs.bar5_xlat =
- ndev->reg_base + SNB_PBAR5XLAT_OFFSET;
- ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
- } else
- ndev->limits.max_mw = SNB_MAX_MW;
- break;
- default:
- /*
- * we should never hit this. the detect function should've
- * take cared of everything.
- */
- return -EINVAL;
- }
-
- ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
- ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
- ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
-
- ndev->limits.msix_cnt = SNB_MSIX_CNT;
- ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
-
- return 0;
-}
-
-static int ntb_bwd_setup(struct ntb_device *ndev)
-{
- int rc;
- u32 val;
-
- ndev->hw_type = BWD_HW;
-
- rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
- if (rc)
- return rc;
-
- switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
- case NTB_CONN_B2B:
- ndev->conn_type = NTB_CONN_B2B;
- break;
- case NTB_CONN_RP:
- default:
- dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
- return -EINVAL;
- }
-
- if (val & BWD_PPD_DEV_TYPE)
- ndev->dev_type = NTB_DEV_DSD;
- else
- ndev->dev_type = NTB_DEV_USD;
-
- /* Initiate PCI-E link training */
- rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
- val | BWD_PPD_INIT_LINK);
- if (rc)
- return rc;
-
- ndev->reg_ofs.ldb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
- ndev->reg_ofs.ldb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
- ndev->reg_ofs.rdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
- ndev->reg_ofs.bar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
- ndev->reg_ofs.bar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
- ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
- ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
- ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
- ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
- ndev->limits.max_mw = BWD_MAX_MW;
- ndev->limits.max_spads = BWD_MAX_SPADS;
- ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
- ndev->limits.msix_cnt = BWD_MSIX_CNT;
- ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
-
- /* Since bwd doesn't have a link interrupt, setup a poll timer */
- INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
- INIT_DELAYED_WORK(&ndev->lr_timer, bwd_link_recovery);
- schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
-
- return 0;
-}
-
-static int ntb_device_setup(struct ntb_device *ndev)
-{
- int rc;
-
- if (is_ntb_xeon(ndev))
- rc = ntb_xeon_setup(ndev);
- else if (is_ntb_atom(ndev))
- rc = ntb_bwd_setup(ndev);
- else
- rc = -ENODEV;
-
- if (rc)
- return rc;
-
- if (ndev->conn_type == NTB_CONN_B2B)
- /* Enable Bus Master and Memory Space on the secondary side */
- writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
- ndev->reg_ofs.spci_cmd);
-
- return 0;
-}
-
-static void ntb_device_free(struct ntb_device *ndev)
-{
- if (is_ntb_atom(ndev)) {
- cancel_delayed_work_sync(&ndev->hb_timer);
- cancel_delayed_work_sync(&ndev->lr_timer);
- }
-}
-
-static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
-{
- struct ntb_db_cb *db_cb = data;
- struct ntb_device *ndev = db_cb->ndev;
- unsigned long mask;
-
- dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
- db_cb->db_num);
-
- mask = readw(ndev->reg_ofs.ldb_mask);
- set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.ldb_mask);
-
- tasklet_schedule(&db_cb->irq_work);
-
- /* No need to check for the specific HB irq, any interrupt means
- * we're connected.
- */
- ndev->last_ts = jiffies;
-
- writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.ldb);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
-{
- struct ntb_db_cb *db_cb = data;
- struct ntb_device *ndev = db_cb->ndev;
- unsigned long mask;
-
- dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
- db_cb->db_num);
-
- mask = readw(ndev->reg_ofs.ldb_mask);
- set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.ldb_mask);
-
- tasklet_schedule(&db_cb->irq_work);
-
- /* On Sandybridge, there are 16 bits in the interrupt register
- * but only 4 vectors. So, 5 bits are assigned to the first 3
- * vectors, with the 4th having a single bit for link
- * interrupts.
- */
- writew(((1 << ndev->bits_per_vector) - 1) <<
- (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.ldb);
-
- return IRQ_HANDLED;
-}
-
-/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
-static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
-{
- struct ntb_device *ndev = dev;
- int rc;
-
- dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
-
- rc = ntb_link_status(ndev);
- if (rc)
- dev_err(&ndev->pdev->dev, "Error determining link status\n");
-
- /* bit 15 is always the link bit */
- writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ntb_interrupt(int irq, void *dev)
-{
- struct ntb_device *ndev = dev;
- unsigned int i = 0;
-
- if (is_ntb_atom(ndev)) {
- u64 ldb = readq(ndev->reg_ofs.ldb);
-
- dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %Lx\n", irq, ldb);
-
- while (ldb) {
- i = __ffs(ldb);
- ldb &= ldb - 1;
- bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
- }
- } else {
- u16 ldb = readw(ndev->reg_ofs.ldb);
-
- dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %x\n", irq, ldb);
-
- if (ldb & SNB_DB_HW_LINK) {
- xeon_event_msix_irq(irq, dev);
- ldb &= ~SNB_DB_HW_LINK;
- }
-
- while (ldb) {
- i = __ffs(ldb);
- ldb &= ldb - 1;
- xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
-{
- struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix;
- int rc, i;
-
- if (msix_entries < ndev->limits.msix_cnt)
- return -ENOSPC;
-
- rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
- if (rc < 0)
- return rc;
-
- for (i = 0; i < msix_entries; i++) {
- msix = &ndev->msix_entries[i];
- WARN_ON(!msix->vector);
-
- if (i == msix_entries - 1) {
- rc = request_irq(msix->vector,
- xeon_event_msix_irq, 0,
- "ntb-event-msix", ndev);
- if (rc)
- goto err;
- } else {
- rc = request_irq(msix->vector,
- xeon_callback_msix_irq, 0,
- "ntb-callback-msix",
- &ndev->db_cb[i]);
- if (rc)
- goto err;
- }
- }
-
- ndev->num_msix = msix_entries;
- ndev->max_cbs = msix_entries - 1;
-
- return 0;
-
-err:
- while (--i >= 0) {
- /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
- msix = &ndev->msix_entries[i];
- free_irq(msix->vector, &ndev->db_cb[i]);
- }
-
- pci_disable_msix(pdev);
- ndev->num_msix = 0;
-
- return rc;
-}
-
-static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
-{
- struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix;
- int rc, i;
-
- msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
- 1, msix_entries);
- if (msix_entries < 0)
- return msix_entries;
-
- for (i = 0; i < msix_entries; i++) {
- msix = &ndev->msix_entries[i];
- WARN_ON(!msix->vector);
-
- rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
- "ntb-callback-msix", &ndev->db_cb[i]);
- if (rc)
- goto err;
- }
-
- ndev->num_msix = msix_entries;
- ndev->max_cbs = msix_entries;
-
- return 0;
-
-err:
- while (--i >= 0)
- free_irq(msix->vector, &ndev->db_cb[i]);
-
- pci_disable_msix(pdev);
- ndev->num_msix = 0;
-
- return rc;
-}
-
-static int ntb_setup_msix(struct ntb_device *ndev)
-{
- struct pci_dev *pdev = ndev->pdev;
- int msix_entries;
- int rc, i;
-
- msix_entries = pci_msix_vec_count(pdev);
- if (msix_entries < 0) {
- rc = msix_entries;
- goto err;
- } else if (msix_entries > ndev->limits.msix_cnt) {
- rc = -EINVAL;
- goto err;
- }
-
- ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
- GFP_KERNEL);
- if (!ndev->msix_entries) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < msix_entries; i++)
- ndev->msix_entries[i].entry = i;
-
- if (is_ntb_atom(ndev))
- rc = ntb_setup_bwd_msix(ndev, msix_entries);
- else
- rc = ntb_setup_snb_msix(ndev, msix_entries);
- if (rc)
- goto err1;
-
- return 0;
-
-err1:
- kfree(ndev->msix_entries);
-err:
- dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
- return rc;
-}
-
-static int ntb_setup_msi(struct ntb_device *ndev)
-{
- struct pci_dev *pdev = ndev->pdev;
- int rc;
-
- rc = pci_enable_msi(pdev);
- if (rc)
- return rc;
-
- rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
- if (rc) {
- pci_disable_msi(pdev);
- dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
- return rc;
- }
-
- return 0;
-}
-
-static int ntb_setup_intx(struct ntb_device *ndev)
-{
- struct pci_dev *pdev = ndev->pdev;
- int rc;
-
- /* Verify intx is enabled */
- pci_intx(pdev, 1);
-
- rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
- ndev);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int ntb_setup_interrupts(struct ntb_device *ndev)
-{
- int rc;
-
- /* On BWD, disable all interrupts. On SNB, disable all but Link
- * Interrupt. The rest will be unmasked as callbacks are registered.
- */
- if (is_ntb_atom(ndev))
- writeq(~0, ndev->reg_ofs.ldb_mask);
- else {
- u16 var = 1 << SNB_LINK_DB;
- writew(~var, ndev->reg_ofs.ldb_mask);
- }
-
- rc = ntb_setup_msix(ndev);
- if (!rc)
- goto done;
-
- ndev->bits_per_vector = 1;
- ndev->max_cbs = ndev->limits.max_db_bits;
-
- rc = ntb_setup_msi(ndev);
- if (!rc)
- goto done;
-
- rc = ntb_setup_intx(ndev);
- if (rc) {
- dev_err(&ndev->pdev->dev, "no usable interrupts\n");
- return rc;
- }
-
-done:
- return 0;
-}
-
-static void ntb_free_interrupts(struct ntb_device *ndev)
-{
- struct pci_dev *pdev = ndev->pdev;
-
- /* mask interrupts */
- if (is_ntb_atom(ndev))
- writeq(~0, ndev->reg_ofs.ldb_mask);
- else
- writew(~0, ndev->reg_ofs.ldb_mask);
-
- if (ndev->num_msix) {
- struct msix_entry *msix;
- u32 i;
-
- for (i = 0; i < ndev->num_msix; i++) {
- msix = &ndev->msix_entries[i];
- if (is_ntb_xeon(ndev) && i == ndev->num_msix - 1)
- free_irq(msix->vector, ndev);
- else
- free_irq(msix->vector, &ndev->db_cb[i]);
- }
- pci_disable_msix(pdev);
- kfree(ndev->msix_entries);
- } else {
- free_irq(pdev->irq, ndev);
-
- if (pci_dev_msi_enabled(pdev))
- pci_disable_msi(pdev);
- }
-}
-
-static int ntb_create_callbacks(struct ntb_device *ndev)
-{
- int i;
-
- /* Chicken-egg issue. We won't know how many callbacks are necessary
- * until we see how many MSI-X vectors we get, but these pointers need
- * to be passed into the MSI-X register function. So, we allocate the
- * max, knowing that they might not all be used, to work around this.
- */
- ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
- sizeof(struct ntb_db_cb),
- GFP_KERNEL);
- if (!ndev->db_cb)
- return -ENOMEM;
-
- for (i = 0; i < ndev->limits.max_db_bits; i++) {
- ndev->db_cb[i].db_num = i;
- ndev->db_cb[i].ndev = ndev;
- }
-
- return 0;
-}
-
-static void ntb_free_callbacks(struct ntb_device *ndev)
-{
- int i;
-
- for (i = 0; i < ndev->limits.max_db_bits; i++)
- ntb_unregister_db_callback(ndev, i);
-
- kfree(ndev->db_cb);
-}
-
-static ssize_t ntb_debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
-{
- struct ntb_device *ndev;
- char *buf;
- ssize_t ret, offset, out_count;
-
- out_count = 500;
-
- buf = kmalloc(out_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ndev = filp->private_data;
- offset = 0;
- offset += snprintf(buf + offset, out_count - offset,
- "NTB Device Information:\n");
- offset += snprintf(buf + offset, out_count - offset,
- "Connection Type - \t\t%s\n",
- ndev->conn_type == NTB_CONN_TRANSPARENT ?
- "Transparent" : (ndev->conn_type == NTB_CONN_B2B) ?
- "Back to back" : "Root Port");
- offset += snprintf(buf + offset, out_count - offset,
- "Device Type - \t\t\t%s\n",
- ndev->dev_type == NTB_DEV_USD ?
- "DSD/USP" : "USD/DSP");
- offset += snprintf(buf + offset, out_count - offset,
- "Max Number of Callbacks - \t%u\n",
- ntb_max_cbs(ndev));
- offset += snprintf(buf + offset, out_count - offset,
- "Link Status - \t\t\t%s\n",
- ntb_hw_link_status(ndev) ? "Up" : "Down");
- if (ntb_hw_link_status(ndev)) {
- offset += snprintf(buf + offset, out_count - offset,
- "Link Speed - \t\t\tPCI-E Gen %u\n",
- ndev->link_speed);
- offset += snprintf(buf + offset, out_count - offset,
- "Link Width - \t\t\tx%u\n",
- ndev->link_width);
- }
-
- if (is_ntb_xeon(ndev)) {
- u32 status32;
- u16 status16;
- int rc;
-
- offset += snprintf(buf + offset, out_count - offset,
- "\nNTB Device Statistics:\n");
- offset += snprintf(buf + offset, out_count - offset,
- "Upstream Memory Miss - \t%u\n",
- readw(ndev->reg_base +
- SNB_USMEMMISS_OFFSET));
-
- offset += snprintf(buf + offset, out_count - offset,
- "\nNTB Hardware Errors:\n");
-
- rc = pci_read_config_word(ndev->pdev, SNB_DEVSTS_OFFSET,
- &status16);
- if (!rc)
- offset += snprintf(buf + offset, out_count - offset,
- "DEVSTS - \t%#06x\n", status16);
-
- rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
- &status16);
- if (!rc)
- offset += snprintf(buf + offset, out_count - offset,
- "LNKSTS - \t%#06x\n", status16);
-
- rc = pci_read_config_dword(ndev->pdev, SNB_UNCERRSTS_OFFSET,
- &status32);
- if (!rc)
- offset += snprintf(buf + offset, out_count - offset,
- "UNCERRSTS - \t%#010x\n", status32);
-
- rc = pci_read_config_dword(ndev->pdev, SNB_CORERRSTS_OFFSET,
- &status32);
- if (!rc)
- offset += snprintf(buf + offset, out_count - offset,
- "CORERRSTS - \t%#010x\n", status32);
- }
-
- if (offset > out_count)
- offset = out_count;
-
- ret = simple_read_from_buffer(ubuf, count, offp, buf, offset);
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations ntb_debugfs_info = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ntb_debugfs_read,
-};
-
-static void ntb_setup_debugfs(struct ntb_device *ndev)
-{
- if (!debugfs_initialized())
- return;
-
- if (!debugfs_dir)
- debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
- debugfs_dir);
- if (ndev->debugfs_dir)
- ndev->debugfs_info = debugfs_create_file("info", S_IRUSR,
- ndev->debugfs_dir,
- ndev,
- &ntb_debugfs_info);
-}
-
-static void ntb_free_debugfs(struct ntb_device *ndev)
-{
- debugfs_remove_recursive(ndev->debugfs_dir);
-
- if (debugfs_dir && simple_empty(debugfs_dir)) {
- debugfs_remove_recursive(debugfs_dir);
- debugfs_dir = NULL;
- }
-}
-
-static void ntb_hw_link_up(struct ntb_device *ndev)
-{
- if (ndev->conn_type == NTB_CONN_TRANSPARENT)
- ntb_link_event(ndev, NTB_LINK_UP);
- else {
- u32 ntb_cntl;
-
- /* Let's bring the NTB link up */
- ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
- ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
- ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
- ntb_cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
- if (ndev->split_bar)
- ntb_cntl |= NTB_CNTL_P2S_BAR5_SNOOP |
- NTB_CNTL_S2P_BAR5_SNOOP;
-
- writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
- }
-}
-
-static void ntb_hw_link_down(struct ntb_device *ndev)
-{
- u32 ntb_cntl;
-
- if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
- ntb_link_event(ndev, NTB_LINK_DOWN);
- return;
- }
-
- /* Bring NTB link down */
- ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
- ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
- ntb_cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
- if (ndev->split_bar)
- ntb_cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP |
- NTB_CNTL_S2P_BAR5_SNOOP);
- ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
- writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
-}
-
-static void ntb_max_mw_detect(struct ntb_device *ndev)
-{
- if (ndev->split_bar)
- ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
- else
- ndev->limits.max_mw = SNB_MAX_MW;
-}
-
-static int ntb_xeon_detect(struct ntb_device *ndev)
-{
- int rc, bars_mask;
- u32 bars;
- u8 ppd;
-
- ndev->hw_type = SNB_HW;
-
- rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &ppd);
- if (rc)
- return -EIO;
-
- if (ppd & SNB_PPD_DEV_TYPE)
- ndev->dev_type = NTB_DEV_USD;
- else
- ndev->dev_type = NTB_DEV_DSD;
-
- ndev->split_bar = (ppd & SNB_PPD_SPLIT_BAR) ? 1 : 0;
-
- switch (ppd & SNB_PPD_CONN_TYPE) {
- case NTB_CONN_B2B:
- dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
- ndev->conn_type = NTB_CONN_B2B;
- break;
- case NTB_CONN_RP:
- dev_info(&ndev->pdev->dev, "Conn Type = RP\n");
- ndev->conn_type = NTB_CONN_RP;
- break;
- case NTB_CONN_TRANSPARENT:
- dev_info(&ndev->pdev->dev, "Conn Type = TRANSPARENT\n");
- ndev->conn_type = NTB_CONN_TRANSPARENT;
- /*
- * This mode is default to USD/DSP. HW does not report
- * properly in transparent mode as it has no knowledge of
- * NTB. We will just force correct here.
- */
- ndev->dev_type = NTB_DEV_USD;
-
- /*
- * This is a way for transparent BAR to figure out if we
- * are doing split BAR or not. There is no way for the hw
- * on the transparent side to know and set the PPD.
- */
- bars_mask = pci_select_bars(ndev->pdev, IORESOURCE_MEM);
- bars = hweight32(bars_mask);
- if (bars == (HSX_SPLITBAR_MAX_MW + 1))
- ndev->split_bar = 1;
-
- break;
- default:
- dev_err(&ndev->pdev->dev, "Unknown PPD %x\n", ppd);
- return -ENODEV;
- }
-
- ntb_max_mw_detect(ndev);
-
- return 0;
-}
-
-static int ntb_atom_detect(struct ntb_device *ndev)
-{
- int rc;
- u32 ppd;
-
- ndev->hw_type = BWD_HW;
- ndev->limits.max_mw = BWD_MAX_MW;
-
- rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
- if (rc)
- return rc;
-
- switch ((ppd & BWD_PPD_CONN_TYPE) >> 8) {
- case NTB_CONN_B2B:
- dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
- ndev->conn_type = NTB_CONN_B2B;
- break;
- case NTB_CONN_RP:
- default:
- dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
- return -EINVAL;
- }
-
- if (ppd & BWD_PPD_DEV_TYPE)
- ndev->dev_type = NTB_DEV_DSD;
- else
- ndev->dev_type = NTB_DEV_USD;
-
- return 0;
-}
-
-static int ntb_device_detect(struct ntb_device *ndev)
-{
- int rc;
-
- if (is_ntb_xeon(ndev))
- rc = ntb_xeon_detect(ndev);
- else if (is_ntb_atom(ndev))
- rc = ntb_atom_detect(ndev);
- else
- rc = -ENODEV;
-
- dev_info(&ndev->pdev->dev, "Device Type = %s\n",
- ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
-
- return 0;
-}
-
-static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ntb_device *ndev;
- int rc, i;
-
- ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
- if (!ndev)
- return -ENOMEM;
-
- ndev->pdev = pdev;
-
- ntb_set_errata_flags(ndev);
-
- ndev->link_status = NTB_LINK_DOWN;
- pci_set_drvdata(pdev, ndev);
- ntb_setup_debugfs(ndev);
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err;
-
- pci_set_master(ndev->pdev);
-
- rc = ntb_device_detect(ndev);
- if (rc)
- goto err;
-
- ndev->mw = kcalloc(ndev->limits.max_mw, sizeof(struct ntb_mw),
- GFP_KERNEL);
- if (!ndev->mw) {
- rc = -ENOMEM;
- goto err1;
- }
-
- if (ndev->split_bar)
- rc = pci_request_selected_regions(pdev, NTB_SPLITBAR_MASK,
- KBUILD_MODNAME);
- else
- rc = pci_request_selected_regions(pdev, NTB_BAR_MASK,
- KBUILD_MODNAME);
-
- if (rc)
- goto err2;
-
- ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
- if (!ndev->reg_base) {
- dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
- rc = -EIO;
- goto err3;
- }
-
- for (i = 0; i < ndev->limits.max_mw; i++) {
- ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
-
- /*
- * with the errata we need to steal last of the memory
- * windows for workarounds and they point to MMIO registers.
- */
- if ((ndev->wa_flags & WA_SNB_ERR) &&
- (i == (ndev->limits.max_mw - 1))) {
- ndev->mw[i].vbase =
- ioremap_nocache(pci_resource_start(pdev,
- MW_TO_BAR(i)),
- ndev->mw[i].bar_sz);
- } else {
- ndev->mw[i].vbase =
- ioremap_wc(pci_resource_start(pdev,
- MW_TO_BAR(i)),
- ndev->mw[i].bar_sz);
- }
-
- dev_info(&pdev->dev, "MW %d size %llu\n", i,
- (unsigned long long) ndev->mw[i].bar_sz);
- if (!ndev->mw[i].vbase) {
- dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
- MW_TO_BAR(i));
- rc = -EIO;
- goto err4;
- }
- }
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err4;
-
- dev_warn(&pdev->dev, "Cannot DMA highmem\n");
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err4;
-
- dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
- }
-
- rc = ntb_device_setup(ndev);
- if (rc)
- goto err4;
-
- rc = ntb_create_callbacks(ndev);
- if (rc)
- goto err5;
-
- rc = ntb_setup_interrupts(ndev);
- if (rc)
- goto err6;
-
- /* The scratchpad registers keep the values between rmmod/insmod,
- * blast them now
- */
- for (i = 0; i < ndev->limits.max_spads; i++) {
- ntb_write_local_spad(ndev, i, 0);
- ntb_write_remote_spad(ndev, i, 0);
- }
-
- rc = ntb_transport_init(pdev);
- if (rc)
- goto err7;
-
- ntb_hw_link_up(ndev);
-
- return 0;
-
-err7:
- ntb_free_interrupts(ndev);
-err6:
- ntb_free_callbacks(ndev);
-err5:
- ntb_device_free(ndev);
-err4:
- for (i--; i >= 0; i--)
- iounmap(ndev->mw[i].vbase);
- iounmap(ndev->reg_base);
-err3:
- if (ndev->split_bar)
- pci_release_selected_regions(pdev, NTB_SPLITBAR_MASK);
- else
- pci_release_selected_regions(pdev, NTB_BAR_MASK);
-err2:
- kfree(ndev->mw);
-err1:
- pci_disable_device(pdev);
-err:
- ntb_free_debugfs(ndev);
- kfree(ndev);
-
- dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
- return rc;
-}
-
-static void ntb_pci_remove(struct pci_dev *pdev)
-{
- struct ntb_device *ndev = pci_get_drvdata(pdev);
- int i;
-
- ntb_hw_link_down(ndev);
-
- ntb_transport_free(ndev->ntb_transport);
-
- ntb_free_interrupts(ndev);
- ntb_free_callbacks(ndev);
- ntb_device_free(ndev);
-
- /* need to reset max_mw limits so we can unmap properly */
- if (ndev->hw_type == SNB_HW)
- ntb_max_mw_detect(ndev);
-
- for (i = 0; i < ndev->limits.max_mw; i++)
- iounmap(ndev->mw[i].vbase);
-
- kfree(ndev->mw);
- iounmap(ndev->reg_base);
- if (ndev->split_bar)
- pci_release_selected_regions(pdev, NTB_SPLITBAR_MASK);
- else
- pci_release_selected_regions(pdev, NTB_BAR_MASK);
- pci_disable_device(pdev);
- ntb_free_debugfs(ndev);
- kfree(ndev);
-}
-
-static struct pci_driver ntb_pci_driver = {
- .name = KBUILD_MODNAME,
- .id_table = ntb_pci_tbl,
- .probe = ntb_pci_probe,
- .remove = ntb_pci_remove,
-};
-
-module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
deleted file mode 100644
index 96de5fc95f90..000000000000
--- a/drivers/ntb/ntb_hw.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copy
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-#include <linux/ntb.h>
-
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
-#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
-#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
-
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel(val & 0xffffffff, addr);
- writel(val >> 32, addr + 4);
-}
-#endif
-
-#define NTB_BAR_MMIO 0
-#define NTB_BAR_23 2
-#define NTB_BAR_4 4
-#define NTB_BAR_5 5
-
-#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
- (1 << NTB_BAR_4))
-#define NTB_SPLITBAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
- (1 << NTB_BAR_4) | (1 << NTB_BAR_5))
-
-#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
-
-enum ntb_hw_event {
- NTB_EVENT_SW_EVENT0 = 0,
- NTB_EVENT_SW_EVENT1,
- NTB_EVENT_SW_EVENT2,
- NTB_EVENT_HW_ERROR,
- NTB_EVENT_HW_LINK_UP,
- NTB_EVENT_HW_LINK_DOWN,
-};
-
-struct ntb_mw {
- dma_addr_t phys_addr;
- void __iomem *vbase;
- resource_size_t bar_sz;
-};
-
-struct ntb_db_cb {
- int (*callback)(void *data, int db_num);
- unsigned int db_num;
- void *data;
- struct ntb_device *ndev;
- struct tasklet_struct irq_work;
-};
-
-#define WA_SNB_ERR 0x00000001
-
-struct ntb_device {
- struct pci_dev *pdev;
- struct msix_entry *msix_entries;
- void __iomem *reg_base;
- struct ntb_mw *mw;
- struct {
- unsigned char max_mw;
- unsigned char max_spads;
- unsigned char max_db_bits;
- unsigned char msix_cnt;
- } limits;
- struct {
- void __iomem *ldb;
- void __iomem *ldb_mask;
- void __iomem *rdb;
- void __iomem *bar2_xlat;
- void __iomem *bar4_xlat;
- void __iomem *bar5_xlat;
- void __iomem *spad_write;
- void __iomem *spad_read;
- void __iomem *lnk_cntl;
- void __iomem *lnk_stat;
- void __iomem *spci_cmd;
- } reg_ofs;
- struct ntb_transport *ntb_transport;
- void (*event_cb)(void *handle, enum ntb_hw_event event);
-
- struct ntb_db_cb *db_cb;
- unsigned char hw_type;
- unsigned char conn_type;
- unsigned char dev_type;
- unsigned char num_msix;
- unsigned char bits_per_vector;
- unsigned char max_cbs;
- unsigned char link_width;
- unsigned char link_speed;
- unsigned char link_status;
- unsigned char split_bar;
-
- struct delayed_work hb_timer;
- unsigned long last_ts;
-
- struct delayed_work lr_timer;
-
- struct dentry *debugfs_dir;
- struct dentry *debugfs_info;
-
- unsigned int wa_flags;
-};
-
-/**
- * ntb_max_cbs() - return the max callbacks
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the maximum number of callbacks
- *
- * RETURNS: the maximum number of callbacks
- */
-static inline unsigned char ntb_max_cbs(struct ntb_device *ndev)
-{
- return ndev->max_cbs;
-}
-
-/**
- * ntb_max_mw() - return the max number of memory windows
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the maximum number of memory windows
- *
- * RETURNS: the maximum number of memory windows
- */
-static inline unsigned char ntb_max_mw(struct ntb_device *ndev)
-{
- return ndev->limits.max_mw;
-}
-
-/**
- * ntb_hw_link_status() - return the hardware link status
- * @ndev: pointer to ntb_device instance
- *
- * Returns true if the hardware is connected to the remote system
- *
- * RETURNS: true or false based on the hardware link state
- */
-static inline bool ntb_hw_link_status(struct ntb_device *ndev)
-{
- return ndev->link_status == NTB_LINK_UP;
-}
-
-/**
- * ntb_query_pdev() - return the pci_dev pointer
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the pci_dev pointer for the NTB hardware device
- *
- * RETURNS: a pointer to the ntb pci_dev
- */
-static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
-{
- return ndev->pdev;
-}
-
-/**
- * ntb_query_debugfs() - return the debugfs pointer
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the debugfs directory pointer for the NTB
- * hardware device
- *
- * RETURNS: a pointer to the debugfs directory
- */
-static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
-{
- return ndev->debugfs_dir;
-}
-
-struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
- void *transport);
-void ntb_unregister_transport(struct ntb_device *ndev);
-void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
-int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
- void *data, int (*db_cb_func)(void *data,
- int db_num));
-void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
-int ntb_register_event_callback(struct ntb_device *ndev,
- void (*event_cb_func)(void *handle,
- enum ntb_hw_event event));
-void ntb_unregister_event_callback(struct ntb_device *ndev);
-int ntb_get_max_spads(struct ntb_device *ndev);
-int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
-int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
-int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
-int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
-resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw);
-void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
-u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
-void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int idx);
-void *ntb_find_transport(struct pci_dev *pdev);
-
-int ntb_transport_init(struct pci_dev *pdev);
-void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
deleted file mode 100644
index f028ff81fd77..000000000000
--- a/drivers/ntb/ntb_regs.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copy
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-
-#define NTB_LINK_STATUS_ACTIVE 0x2000
-#define NTB_LINK_SPEED_MASK 0x000f
-#define NTB_LINK_WIDTH_MASK 0x03f0
-
-#define SNB_MSIX_CNT 4
-#define SNB_MAX_B2B_SPADS 16
-#define SNB_MAX_COMPAT_SPADS 16
-/* Reserve the uppermost bit for link interrupt */
-#define SNB_MAX_DB_BITS 15
-#define SNB_LINK_DB 15
-#define SNB_DB_BITS_PER_VEC 5
-#define HSX_SPLITBAR_MAX_MW 3
-#define SNB_MAX_MW 2
-#define SNB_ERRATA_MAX_MW 1
-
-#define SNB_DB_HW_LINK 0x8000
-
-#define SNB_UNCERRSTS_OFFSET 0x014C
-#define SNB_CORERRSTS_OFFSET 0x0158
-#define SNB_LINK_STATUS_OFFSET 0x01A2
-#define SNB_PCICMD_OFFSET 0x0504
-#define SNB_DEVCTRL_OFFSET 0x0598
-#define SNB_DEVSTS_OFFSET 0x059A
-#define SNB_SLINK_STATUS_OFFSET 0x05A2
-
-#define SNB_PBAR2LMT_OFFSET 0x0000
-#define SNB_PBAR4LMT_OFFSET 0x0008
-#define SNB_PBAR5LMT_OFFSET 0x000C
-#define SNB_PBAR2XLAT_OFFSET 0x0010
-#define SNB_PBAR4XLAT_OFFSET 0x0018
-#define SNB_PBAR5XLAT_OFFSET 0x001C
-#define SNB_SBAR2LMT_OFFSET 0x0020
-#define SNB_SBAR4LMT_OFFSET 0x0028
-#define SNB_SBAR5LMT_OFFSET 0x002C
-#define SNB_SBAR2XLAT_OFFSET 0x0030
-#define SNB_SBAR4XLAT_OFFSET 0x0038
-#define SNB_SBAR5XLAT_OFFSET 0x003C
-#define SNB_SBAR0BASE_OFFSET 0x0040
-#define SNB_SBAR2BASE_OFFSET 0x0048
-#define SNB_SBAR4BASE_OFFSET 0x0050
-#define SNB_SBAR5BASE_OFFSET 0x0054
-#define SNB_NTBCNTL_OFFSET 0x0058
-#define SNB_SBDF_OFFSET 0x005C
-#define SNB_PDOORBELL_OFFSET 0x0060
-#define SNB_PDBMSK_OFFSET 0x0062
-#define SNB_SDOORBELL_OFFSET 0x0064
-#define SNB_SDBMSK_OFFSET 0x0066
-#define SNB_USMEMMISS_OFFSET 0x0070
-#define SNB_SPAD_OFFSET 0x0080
-#define SNB_SPADSEMA4_OFFSET 0x00c0
-#define SNB_WCCNTRL_OFFSET 0x00e0
-#define SNB_B2B_SPAD_OFFSET 0x0100
-#define SNB_B2B_DOORBELL_OFFSET 0x0140
-#define SNB_B2B_XLAT_OFFSETL 0x0144
-#define SNB_B2B_XLAT_OFFSETU 0x0148
-
-/*
- * The addresses are setup so the 32bit BARs can function. Thus
- * the addresses are all in 32bit space
- */
-#define SNB_MBAR01_USD_ADDR 0x000000002100000CULL
-#define SNB_MBAR23_USD_ADDR 0x000000004100000CULL
-#define SNB_MBAR4_USD_ADDR 0x000000008100000CULL
-#define SNB_MBAR5_USD_ADDR 0x00000000A100000CULL
-#define SNB_MBAR01_DSD_ADDR 0x000000002000000CULL
-#define SNB_MBAR23_DSD_ADDR 0x000000004000000CULL
-#define SNB_MBAR4_DSD_ADDR 0x000000008000000CULL
-#define SNB_MBAR5_DSD_ADDR 0x00000000A000000CULL
-
-#define BWD_MSIX_CNT 34
-#define BWD_MAX_SPADS 16
-#define BWD_MAX_DB_BITS 34
-#define BWD_DB_BITS_PER_VEC 1
-#define BWD_MAX_MW 2
-
-#define BWD_PCICMD_OFFSET 0xb004
-#define BWD_MBAR23_OFFSET 0xb018
-#define BWD_MBAR45_OFFSET 0xb020
-#define BWD_DEVCTRL_OFFSET 0xb048
-#define BWD_LINK_STATUS_OFFSET 0xb052
-#define BWD_ERRCORSTS_OFFSET 0xb110
-
-#define BWD_SBAR2XLAT_OFFSET 0x0008
-#define BWD_SBAR4XLAT_OFFSET 0x0010
-#define BWD_PDOORBELL_OFFSET 0x0020
-#define BWD_PDBMSK_OFFSET 0x0028
-#define BWD_NTBCNTL_OFFSET 0x0060
-#define BWD_EBDF_OFFSET 0x0064
-#define BWD_SPAD_OFFSET 0x0080
-#define BWD_SPADSEMA_OFFSET 0x00c0
-#define BWD_STKYSPAD_OFFSET 0x00c4
-#define BWD_PBAR2XLAT_OFFSET 0x8008
-#define BWD_PBAR4XLAT_OFFSET 0x8010
-#define BWD_B2B_DOORBELL_OFFSET 0x8020
-#define BWD_B2B_SPAD_OFFSET 0x8080
-#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
-#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
-
-#define BWD_MODPHY_PCSREG4 0x1c004
-#define BWD_MODPHY_PCSREG6 0x1c006
-
-#define BWD_IP_BASE 0xC000
-#define BWD_DESKEWSTS_OFFSET (BWD_IP_BASE + 0x3024)
-#define BWD_LTSSMERRSTS0_OFFSET (BWD_IP_BASE + 0x3180)
-#define BWD_LTSSMSTATEJMP_OFFSET (BWD_IP_BASE + 0x3040)
-#define BWD_IBSTERRRCRVSTS0_OFFSET (BWD_IP_BASE + 0x3324)
-
-#define BWD_DESKEWSTS_DBERR (1 << 15)
-#define BWD_LTSSMERRSTS0_UNEXPECTEDEI (1 << 20)
-#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
-#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
-
-#define NTB_CNTL_CFG_LOCK (1 << 0)
-#define NTB_CNTL_LINK_DISABLE (1 << 1)
-#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
-#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
-#define NTB_CNTL_S2P_BAR4_SNOOP (1 << 6)
-#define NTB_CNTL_P2S_BAR4_SNOOP (1 << 8)
-#define NTB_CNTL_S2P_BAR5_SNOOP (1 << 12)
-#define NTB_CNTL_P2S_BAR5_SNOOP (1 << 14)
-#define BWD_CNTL_LINK_DOWN (1 << 16)
-
-#define NTB_PPD_OFFSET 0x00D4
-#define SNB_PPD_CONN_TYPE 0x0003
-#define SNB_PPD_DEV_TYPE 0x0010
-#define SNB_PPD_SPLIT_BAR (1 << 6)
-#define BWD_PPD_INIT_LINK 0x0008
-#define BWD_PPD_CONN_TYPE 0x0300
-#define BWD_PPD_DEV_TYPE 0x1000
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e9bf2f47b61a..efe3ad4122f2 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -13,6 +14,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +42,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Intel PCIe NTB Linux driver
+ * PCIe NTB Transport Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
@@ -56,11 +58,25 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include "ntb_hw.h"
+#include <linux/uaccess.h>
+#include "linux/ntb.h"
+#include "linux/ntb_transport.h"
-#define NTB_TRANSPORT_VERSION 3
+#define NTB_TRANSPORT_VERSION 4
+#define NTB_TRANSPORT_VER "4"
+#define NTB_TRANSPORT_NAME "ntb_transport"
+#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
-static unsigned int transport_mtu = 0x401E;
+MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
+MODULE_VERSION(NTB_TRANSPORT_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
+static unsigned int transport_mtu = 0x10000;
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
@@ -72,10 +88,16 @@ static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
+static bool use_dma;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
+
+static struct dentry *nt_debugfs_dir;
+
struct ntb_queue_entry {
/* ntb_queue list reference */
struct list_head entry;
- /* pointers to data to be transfered */
+ /* pointers to data to be transferred */
void *cb_data;
void *buf;
unsigned int len;
@@ -94,14 +116,16 @@ struct ntb_rx_info {
};
struct ntb_transport_qp {
- struct ntb_transport *transport;
- struct ntb_device *ndev;
+ struct ntb_transport_ctx *transport;
+ struct ntb_dev *ndev;
void *cb_data;
struct dma_chan *dma_chan;
bool client_ready;
- bool qp_link;
+ bool link_is_up;
+
u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
+ u64 qp_bit;
struct ntb_rx_info __iomem *rx_info;
struct ntb_rx_info *remote_rx_info;
@@ -127,6 +151,7 @@ struct ntb_transport_qp {
unsigned int rx_max_entry;
unsigned int rx_max_frame;
dma_cookie_t last_cookie;
+ struct tasklet_struct rxc_db_work;
void (*event_handler)(void *data, int status);
struct delayed_work link_work;
@@ -153,33 +178,44 @@ struct ntb_transport_qp {
};
struct ntb_transport_mw {
- size_t size;
+ phys_addr_t phys_addr;
+ resource_size_t phys_size;
+ resource_size_t xlat_align;
+ resource_size_t xlat_align_size;
+ void __iomem *vbase;
+ size_t xlat_size;
+ size_t buff_size;
void *virt_addr;
dma_addr_t dma_addr;
};
struct ntb_transport_client_dev {
struct list_head entry;
+ struct ntb_transport_ctx *nt;
struct device dev;
};
-struct ntb_transport {
+struct ntb_transport_ctx {
struct list_head entry;
struct list_head client_devs;
- struct ntb_device *ndev;
- struct ntb_transport_mw *mw;
- struct ntb_transport_qp *qps;
- unsigned int max_qps;
- unsigned long qp_bitmap;
- bool transport_link;
+ struct ntb_dev *ndev;
+
+ struct ntb_transport_mw *mw_vec;
+ struct ntb_transport_qp *qp_vec;
+ unsigned int mw_count;
+ unsigned int qp_count;
+ u64 qp_bitmap;
+ u64 qp_bitmap_free;
+
+ bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
};
enum {
- DESC_DONE_FLAG = 1 << 0,
- LINK_DOWN_FLAG = 1 << 1,
+ DESC_DONE_FLAG = BIT(0),
+ LINK_DOWN_FLAG = BIT(1),
};
struct ntb_payload_header {
@@ -200,68 +236,69 @@ enum {
MAX_SPAD,
};
-#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
+#define dev_client_dev(__dev) \
+ container_of((__dev), struct ntb_transport_client_dev, dev)
+
+#define drv_client(__drv) \
+ container_of((__drv), struct ntb_transport_client, driver)
+
+#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
-static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+static void ntb_transport_rxc_db(unsigned long data);
+static const struct ntb_ctx_ops ntb_transport_ops;
+static struct ntb_client ntb_transport_client;
+
+static int ntb_transport_bus_match(struct device *dev,
+ struct device_driver *drv)
{
return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}
-static int ntb_client_probe(struct device *dev)
+static int ntb_transport_bus_probe(struct device *dev)
{
- const struct ntb_client *drv = container_of(dev->driver,
- struct ntb_client, driver);
- struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+ const struct ntb_transport_client *client;
int rc = -EINVAL;
get_device(dev);
- if (drv && drv->probe)
- rc = drv->probe(pdev);
+
+ client = drv_client(dev->driver);
+ rc = client->probe(dev);
if (rc)
put_device(dev);
return rc;
}
-static int ntb_client_remove(struct device *dev)
+static int ntb_transport_bus_remove(struct device *dev)
{
- const struct ntb_client *drv = container_of(dev->driver,
- struct ntb_client, driver);
- struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+ const struct ntb_transport_client *client;
- if (drv && drv->remove)
- drv->remove(pdev);
+ client = drv_client(dev->driver);
+ client->remove(dev);
put_device(dev);
return 0;
}
-static struct bus_type ntb_bus_type = {
- .name = "ntb_bus",
- .match = ntb_match_bus,
- .probe = ntb_client_probe,
- .remove = ntb_client_remove,
+static struct bus_type ntb_transport_bus = {
+ .name = "ntb_transport",
+ .match = ntb_transport_bus_match,
+ .probe = ntb_transport_bus_probe,
+ .remove = ntb_transport_bus_remove,
};
static LIST_HEAD(ntb_transport_list);
-static int ntb_bus_init(struct ntb_transport *nt)
+static int ntb_bus_init(struct ntb_transport_ctx *nt)
{
- if (list_empty(&ntb_transport_list)) {
- int rc = bus_register(&ntb_bus_type);
- if (rc)
- return rc;
- }
-
list_add(&nt->entry, &ntb_transport_list);
-
return 0;
}
-static void ntb_bus_remove(struct ntb_transport *nt)
+static void ntb_bus_remove(struct ntb_transport_ctx *nt)
{
struct ntb_transport_client_dev *client_dev, *cd;
@@ -273,29 +310,26 @@ static void ntb_bus_remove(struct ntb_transport *nt)
}
list_del(&nt->entry);
-
- if (list_empty(&ntb_transport_list))
- bus_unregister(&ntb_bus_type);
}
-static void ntb_client_release(struct device *dev)
+static void ntb_transport_client_release(struct device *dev)
{
struct ntb_transport_client_dev *client_dev;
- client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
+ client_dev = dev_client_dev(dev);
kfree(client_dev);
}
/**
- * ntb_unregister_client_dev - Unregister NTB client device
+ * ntb_transport_unregister_client_dev - Unregister NTB client device
* @device_name: Name of NTB client device
*
* Unregister an NTB client device with the NTB transport layer
*/
-void ntb_unregister_client_dev(char *device_name)
+void ntb_transport_unregister_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client, *cd;
- struct ntb_transport *nt;
+ struct ntb_transport_ctx *nt;
list_for_each_entry(nt, &ntb_transport_list, entry)
list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
@@ -305,18 +339,19 @@ void ntb_unregister_client_dev(char *device_name)
device_unregister(&client->dev);
}
}
-EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
/**
- * ntb_register_client_dev - Register NTB client device
+ * ntb_transport_register_client_dev - Register NTB client device
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
*/
-int ntb_register_client_dev(char *device_name)
+int ntb_transport_register_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client_dev;
- struct ntb_transport *nt;
+ struct ntb_transport_ctx *nt;
+ int node;
int rc, i = 0;
if (list_empty(&ntb_transport_list))
@@ -325,8 +360,10 @@ int ntb_register_client_dev(char *device_name)
list_for_each_entry(nt, &ntb_transport_list, entry) {
struct device *dev;
- client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
- GFP_KERNEL);
+ node = dev_to_node(&nt->ndev->dev);
+
+ client_dev = kzalloc_node(sizeof(*client_dev),
+ GFP_KERNEL, node);
if (!client_dev) {
rc = -ENOMEM;
goto err;
@@ -336,9 +373,9 @@ int ntb_register_client_dev(char *device_name)
/* setup and register client devices */
dev_set_name(dev, "%s%d", device_name, i);
- dev->bus = &ntb_bus_type;
- dev->release = ntb_client_release;
- dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+ dev->bus = &ntb_transport_bus;
+ dev->release = ntb_transport_client_release;
+ dev->parent = &nt->ndev->dev;
rc = device_register(dev);
if (rc) {
@@ -353,44 +390,44 @@ int ntb_register_client_dev(char *device_name)
return 0;
err:
- ntb_unregister_client_dev(device_name);
+ ntb_transport_unregister_client_dev(device_name);
return rc;
}
-EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
/**
- * ntb_register_client - Register NTB client driver
+ * ntb_transport_register_client - Register NTB client driver
* @drv: NTB client driver to be registered
*
* Register an NTB client driver with the NTB transport layer
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-int ntb_register_client(struct ntb_client *drv)
+int ntb_transport_register_client(struct ntb_transport_client *drv)
{
- drv->driver.bus = &ntb_bus_type;
+ drv->driver.bus = &ntb_transport_bus;
if (list_empty(&ntb_transport_list))
return -ENODEV;
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(ntb_register_client);
+EXPORT_SYMBOL_GPL(ntb_transport_register_client);
/**
- * ntb_unregister_client - Unregister NTB client driver
+ * ntb_transport_unregister_client - Unregister NTB client driver
* @drv: NTB client driver to be unregistered
*
* Unregister an NTB client driver with the NTB transport layer
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-void ntb_unregister_client(struct ntb_client *drv)
+void ntb_transport_unregister_client(struct ntb_transport_client *drv)
{
driver_unregister(&drv->driver);
}
-EXPORT_SYMBOL_GPL(ntb_unregister_client);
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
loff_t *offp)
@@ -452,8 +489,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
"tx_max_entry - \t%u\n", qp->tx_max_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
- "Up" : "Down");
+ "\nQP Link %s\n",
+ qp->link_is_up ? "Up" : "Down");
if (out_offset > out_count)
out_offset = out_count;
@@ -497,26 +534,31 @@ out:
return entry;
}
-static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
- unsigned int qp_num)
+static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
+ unsigned int qp_num)
{
- struct ntb_transport_qp *qp = &nt->qps[qp_num];
+ struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
+ struct ntb_transport_mw *mw;
unsigned int rx_size, num_qps_mw;
- u8 mw_num, mw_max;
+ unsigned int mw_num, mw_count, qp_count;
unsigned int i;
- mw_max = ntb_max_mw(nt->ndev);
- mw_num = QP_TO_MW(nt->ndev, qp_num);
+ mw_count = nt->mw_count;
+ qp_count = nt->qp_count;
+
+ mw_num = QP_TO_MW(nt, qp_num);
+ mw = &nt->mw_vec[mw_num];
- WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+ if (!mw->virt_addr)
+ return -ENOMEM;
- if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
- num_qps_mw = nt->max_qps / mw_max + 1;
+ if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ num_qps_mw = qp_count / mw_count + 1;
else
- num_qps_mw = nt->max_qps / mw_max;
+ num_qps_mw = qp_count / mw_count;
- rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
- qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
+ rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
+ qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
rx_size -= sizeof(struct ntb_rx_info);
qp->remote_rx_info = qp->rx_buff + rx_size;
@@ -530,49 +572,63 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
/* setup the hdr offsets with 0's */
for (i = 0; i < qp->rx_max_entry; i++) {
- void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
- sizeof(struct ntb_payload_header);
+ void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
+ sizeof(struct ntb_payload_header));
memset(offset, 0, sizeof(struct ntb_payload_header));
}
qp->rx_pkts = 0;
qp->tx_pkts = 0;
qp->tx_index = 0;
+
+ return 0;
}
-static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
{
- struct ntb_transport_mw *mw = &nt->mw[num_mw];
- struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+ struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+ struct pci_dev *pdev = nt->ndev->pdev;
if (!mw->virt_addr)
return;
- dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+ ntb_mw_clear_trans(nt->ndev, num_mw);
+ dma_free_coherent(&pdev->dev, mw->buff_size,
+ mw->virt_addr, mw->dma_addr);
+ mw->xlat_size = 0;
+ mw->buff_size = 0;
mw->virt_addr = NULL;
}
-static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
+ unsigned int size)
{
- struct ntb_transport_mw *mw = &nt->mw[num_mw];
- struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+ struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+ struct pci_dev *pdev = nt->ndev->pdev;
+ unsigned int xlat_size, buff_size;
+ int rc;
+
+ xlat_size = round_up(size, mw->xlat_align_size);
+ buff_size = round_up(size, mw->xlat_align);
/* No need to re-setup */
- if (mw->size == ALIGN(size, 4096))
+ if (mw->xlat_size == xlat_size)
return 0;
- if (mw->size != 0)
+ if (mw->buff_size)
ntb_free_mw(nt, num_mw);
- /* Alloc memory for receiving data. Must be 4k aligned */
- mw->size = ALIGN(size, 4096);
+ /* Alloc memory for receiving data. Must be aligned */
+ mw->xlat_size = xlat_size;
+ mw->buff_size = buff_size;
- mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
- GFP_KERNEL);
+ mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
+ &mw->dma_addr, GFP_KERNEL);
if (!mw->virt_addr) {
- mw->size = 0;
- dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
- (int) mw->size);
+ mw->xlat_size = 0;
+ mw->buff_size = 0;
+ dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+ buff_size);
return -ENOMEM;
}
@@ -582,34 +638,58 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
* is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB.
*/
- if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
- dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+ if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+ dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr);
ntb_free_mw(nt, num_mw);
return -ENOMEM;
}
/* Notify HW the memory location of the receive buffer */
- ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+ rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
+ ntb_free_mw(nt, num_mw);
+ return -EIO;
+ }
return 0;
}
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+ qp->link_is_up = false;
+
+ qp->tx_index = 0;
+ qp->rx_index = 0;
+ qp->rx_bytes = 0;
+ qp->rx_pkts = 0;
+ qp->rx_ring_empty = 0;
+ qp->rx_err_no_buf = 0;
+ qp->rx_err_oflow = 0;
+ qp->rx_err_ver = 0;
+ qp->rx_memcpy = 0;
+ qp->rx_async = 0;
+ qp->tx_bytes = 0;
+ qp->tx_pkts = 0;
+ qp->tx_ring_full = 0;
+ qp->tx_err_no_buf = 0;
+ qp->tx_memcpy = 0;
+ qp->tx_async = 0;
+}
+
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
- struct ntb_transport *nt = qp->transport;
- struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+ struct ntb_transport_ctx *nt = qp->transport;
+ struct pci_dev *pdev = nt->ndev->pdev;
- if (qp->qp_link == NTB_LINK_DOWN) {
- cancel_delayed_work_sync(&qp->link_work);
- return;
- }
+ dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
- if (qp->event_handler)
- qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+ cancel_delayed_work_sync(&qp->link_work);
+ ntb_qp_link_down_reset(qp);
- dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
- qp->qp_link = NTB_LINK_DOWN;
+ if (qp->event_handler)
+ qp->event_handler(qp->cb_data, qp->link_is_up);
}
static void ntb_qp_link_cleanup_work(struct work_struct *work)
@@ -617,11 +697,11 @@ static void ntb_qp_link_cleanup_work(struct work_struct *work)
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_cleanup);
- struct ntb_transport *nt = qp->transport;
+ struct ntb_transport_ctx *nt = qp->transport;
ntb_qp_link_cleanup(qp);
- if (nt->transport_link == NTB_LINK_UP)
+ if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
@@ -631,180 +711,132 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
schedule_work(&qp->link_cleanup);
}
-static void ntb_transport_link_cleanup(struct ntb_transport *nt)
+static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
+ struct ntb_transport_qp *qp;
+ u64 qp_bitmap_alloc;
int i;
+ qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
+
/* Pass along the info to any clients */
- for (i = 0; i < nt->max_qps; i++)
- if (!test_bit(i, &nt->qp_bitmap))
- ntb_qp_link_cleanup(&nt->qps[i]);
+ for (i = 0; i < nt->qp_count; i++)
+ if (qp_bitmap_alloc & BIT_ULL(i)) {
+ qp = &nt->qp_vec[i];
+ ntb_qp_link_cleanup(qp);
+ cancel_work_sync(&qp->link_cleanup);
+ cancel_delayed_work_sync(&qp->link_work);
+ }
- if (nt->transport_link == NTB_LINK_DOWN)
+ if (!nt->link_is_up)
cancel_delayed_work_sync(&nt->link_work);
- else
- nt->transport_link = NTB_LINK_DOWN;
/* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
for (i = 0; i < MAX_SPAD; i++)
- ntb_write_local_spad(nt->ndev, i, 0);
+ ntb_spad_write(nt->ndev, i, 0);
}
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
- struct ntb_transport *nt = container_of(work, struct ntb_transport,
- link_cleanup);
+ struct ntb_transport_ctx *nt =
+ container_of(work, struct ntb_transport_ctx, link_cleanup);
ntb_transport_link_cleanup(nt);
}
-static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+static void ntb_transport_event_callback(void *data)
{
- struct ntb_transport *nt = data;
+ struct ntb_transport_ctx *nt = data;
- switch (event) {
- case NTB_EVENT_HW_LINK_UP:
+ if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work, 0);
- break;
- case NTB_EVENT_HW_LINK_DOWN:
+ else
schedule_work(&nt->link_cleanup);
- break;
- default:
- BUG();
- }
}
static void ntb_transport_link_work(struct work_struct *work)
{
- struct ntb_transport *nt = container_of(work, struct ntb_transport,
- link_work.work);
- struct ntb_device *ndev = nt->ndev;
- struct pci_dev *pdev = ntb_query_pdev(ndev);
+ struct ntb_transport_ctx *nt =
+ container_of(work, struct ntb_transport_ctx, link_work.work);
+ struct ntb_dev *ndev = nt->ndev;
+ struct pci_dev *pdev = ndev->pdev;
+ resource_size_t size;
u32 val;
- int rc, i;
+ int rc, i, spad;
/* send the local info, in the opposite order of the way we read it */
- for (i = 0; i < ntb_max_mw(ndev); i++) {
- rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
- ntb_get_mw_size(ndev, i) >> 32);
- if (rc) {
- dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
- (u32)(ntb_get_mw_size(ndev, i) >> 32),
- MW0_SZ_HIGH + (i * 2));
- goto out;
- }
+ for (i = 0; i < nt->mw_count; i++) {
+ size = nt->mw_vec[i].phys_size;
- rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
- (u32) ntb_get_mw_size(ndev, i));
- if (rc) {
- dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
- (u32) ntb_get_mw_size(ndev, i),
- MW0_SZ_LOW + (i * 2));
- goto out;
- }
- }
+ if (max_mw_size && size > max_mw_size)
+ size = max_mw_size;
- rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- ntb_max_mw(ndev), NUM_MWS);
- goto out;
- }
+ spad = MW0_SZ_HIGH + (i * 2);
+ ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
- rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- nt->max_qps, NUM_QPS);
- goto out;
+ spad = MW0_SZ_LOW + (i * 2);
+ ntb_peer_spad_write(ndev, spad, (u32)size);
}
- rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- NTB_TRANSPORT_VERSION, VERSION);
- goto out;
- }
+ ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
- /* Query the remote side for its info */
- rc = ntb_read_remote_spad(ndev, VERSION, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
- goto out;
- }
+ ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
- if (val != NTB_TRANSPORT_VERSION)
- goto out;
- dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+ ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
- rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+ /* Query the remote side for its info */
+ val = ntb_spad_read(ndev, VERSION);
+ dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+ if (val != NTB_TRANSPORT_VERSION)
goto out;
- }
- if (val != nt->max_qps)
- goto out;
+ val = ntb_spad_read(ndev, NUM_QPS);
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
-
- rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
+ if (val != nt->qp_count)
goto out;
- }
- if (val != ntb_max_mw(ndev))
- goto out;
+ val = ntb_spad_read(ndev, NUM_MWS);
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
+ if (val != nt->mw_count)
+ goto out;
- for (i = 0; i < ntb_max_mw(ndev); i++) {
+ for (i = 0; i < nt->mw_count; i++) {
u64 val64;
- rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n",
- MW0_SZ_HIGH + (i * 2));
- goto out1;
- }
-
- val64 = (u64) val << 32;
-
- rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n",
- MW0_SZ_LOW + (i * 2));
- goto out1;
- }
+ val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
+ val64 = (u64)val << 32;
+ val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
val64 |= val;
- dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+ dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
rc = ntb_set_mw(nt, i, val64);
if (rc)
goto out1;
}
- nt->transport_link = NTB_LINK_UP;
+ nt->link_is_up = true;
- for (i = 0; i < nt->max_qps; i++) {
- struct ntb_transport_qp *qp = &nt->qps[i];
+ for (i = 0; i < nt->qp_count; i++) {
+ struct ntb_transport_qp *qp = &nt->qp_vec[i];
ntb_transport_setup_qp_mw(nt, i);
- if (qp->client_ready == NTB_LINK_UP)
+ if (qp->client_ready)
schedule_delayed_work(&qp->link_work, 0);
}
return;
out1:
- for (i = 0; i < ntb_max_mw(ndev); i++)
+ for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
out:
- if (ntb_hw_link_status(ndev))
+ if (ntb_link_is_up(ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
@@ -814,73 +846,73 @@ static void ntb_qp_link_work(struct work_struct *work)
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_work.work);
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
- struct ntb_transport *nt = qp->transport;
- int rc, val;
+ struct pci_dev *pdev = qp->ndev->pdev;
+ struct ntb_transport_ctx *nt = qp->transport;
+ int val;
- WARN_ON(nt->transport_link != NTB_LINK_UP);
+ WARN_ON(!nt->link_is_up);
- rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
- return;
- }
+ val = ntb_spad_read(nt->ndev, QP_LINKS);
- rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
- if (rc)
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- val | 1 << qp->qp_num, QP_LINKS);
+ ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
- rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
- if (rc)
- dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
-
- dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+ ntb_peer_spad_read(nt->ndev, QP_LINKS);
+ dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */
- if (1 << qp->qp_num & val) {
- qp->qp_link = NTB_LINK_UP;
-
+ if (val & BIT(qp->qp_num)) {
dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+ qp->link_is_up = true;
+
if (qp->event_handler)
- qp->event_handler(qp->cb_data, NTB_LINK_UP);
- } else if (nt->transport_link == NTB_LINK_UP)
+ qp->event_handler(qp->cb_data, qp->link_is_up);
+ } else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
-static int ntb_transport_init_queue(struct ntb_transport *nt,
+static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp;
+ struct ntb_transport_mw *mw;
+ phys_addr_t mw_base;
+ resource_size_t mw_size;
unsigned int num_qps_mw, tx_size;
- u8 mw_num, mw_max;
+ unsigned int mw_num, mw_count, qp_count;
u64 qp_offset;
- mw_max = ntb_max_mw(nt->ndev);
- mw_num = QP_TO_MW(nt->ndev, qp_num);
+ mw_count = nt->mw_count;
+ qp_count = nt->qp_count;
- qp = &nt->qps[qp_num];
+ mw_num = QP_TO_MW(nt, qp_num);
+ mw = &nt->mw_vec[mw_num];
+
+ qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num;
qp->transport = nt;
qp->ndev = nt->ndev;
- qp->qp_link = NTB_LINK_DOWN;
- qp->client_ready = NTB_LINK_DOWN;
+ qp->client_ready = false;
qp->event_handler = NULL;
+ ntb_qp_link_down_reset(qp);
- if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
- num_qps_mw = nt->max_qps / mw_max + 1;
+ if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ num_qps_mw = qp_count / mw_count + 1;
else
- num_qps_mw = nt->max_qps / mw_max;
+ num_qps_mw = qp_count / mw_count;
+
+ mw_base = nt->mw_vec[mw_num].phys_addr;
+ mw_size = nt->mw_vec[mw_num].phys_size;
- tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
- qp_offset = qp_num / mw_max * tx_size;
- qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
+ tx_size = (unsigned int)mw_size / num_qps_mw;
+ qp_offset = tx_size * qp_num / mw_count;
+
+ qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw)
return -EINVAL;
- qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
+ qp->tx_mw_phys = mw_base + qp_offset;
if (!qp->tx_mw_phys)
return -EINVAL;
@@ -891,16 +923,19 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (ntb_query_debugfs(nt->ndev)) {
+ if (nt_debugfs_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- ntb_query_debugfs(nt->ndev));
+ nt_debugfs_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
&ntb_qp_debugfs_stats);
+ } else {
+ qp->debugfs_dir = NULL;
+ qp->debugfs_stats = NULL;
}
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
@@ -914,46 +949,89 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
+ tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
+ (unsigned long)qp);
+
return 0;
}
-int ntb_transport_init(struct pci_dev *pdev)
+static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
- struct ntb_transport *nt;
+ struct ntb_transport_ctx *nt;
+ struct ntb_transport_mw *mw;
+ unsigned int mw_count, qp_count;
+ u64 qp_bitmap;
+ int node;
int rc, i;
- nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+ if (ntb_db_is_unsafe(ndev))
+ dev_dbg(&ndev->dev,
+ "doorbell is unsafe, proceed anyway...\n");
+ if (ntb_spad_is_unsafe(ndev))
+ dev_dbg(&ndev->dev,
+ "scratchpad is unsafe, proceed anyway...\n");
+
+ node = dev_to_node(&ndev->dev);
+
+ nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
if (!nt)
return -ENOMEM;
- nt->ndev = ntb_register_transport(pdev, nt);
- if (!nt->ndev) {
- rc = -EIO;
+ nt->ndev = ndev;
+
+ mw_count = ntb_mw_count(ndev);
+
+ nt->mw_count = mw_count;
+
+ nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
+ GFP_KERNEL, node);
+ if (!nt->mw_vec) {
+ rc = -ENOMEM;
goto err;
}
- nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
- GFP_KERNEL);
- if (!nt->mw) {
- rc = -ENOMEM;
- goto err1;
+ for (i = 0; i < mw_count; i++) {
+ mw = &nt->mw_vec[i];
+
+ rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
+ &mw->xlat_align, &mw->xlat_align_size);
+ if (rc)
+ goto err1;
+
+ mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
+ if (!mw->vbase) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ mw->buff_size = 0;
+ mw->xlat_size = 0;
+ mw->virt_addr = NULL;
+ mw->dma_addr = 0;
}
- if (max_num_clients)
- nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
- else
- nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
+ qp_bitmap = ntb_db_valid_mask(ndev);
+
+ qp_count = ilog2(qp_bitmap);
+ if (max_num_clients && max_num_clients < qp_count)
+ qp_count = max_num_clients;
+ else if (mw_count < qp_count)
+ qp_count = mw_count;
+
+ qp_bitmap &= BIT_ULL(qp_count) - 1;
- nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
- GFP_KERNEL);
- if (!nt->qps) {
+ nt->qp_count = qp_count;
+ nt->qp_bitmap = qp_bitmap;
+ nt->qp_bitmap_free = qp_bitmap;
+
+ nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
+ GFP_KERNEL, node);
+ if (!nt->qp_vec) {
rc = -ENOMEM;
goto err2;
}
- nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
-
- for (i = 0; i < nt->max_qps; i++) {
+ for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
goto err3;
@@ -962,8 +1040,7 @@ int ntb_transport_init(struct pci_dev *pdev)
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
- rc = ntb_register_event_callback(nt->ndev,
- ntb_transport_event_callback);
+ rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
if (rc)
goto err3;
@@ -972,51 +1049,61 @@ int ntb_transport_init(struct pci_dev *pdev)
if (rc)
goto err4;
- if (ntb_hw_link_status(nt->ndev))
- schedule_delayed_work(&nt->link_work, 0);
+ nt->link_is_up = false;
+ ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ ntb_link_event(ndev);
return 0;
err4:
- ntb_unregister_event_callback(nt->ndev);
+ ntb_clear_ctx(ndev);
err3:
- kfree(nt->qps);
+ kfree(nt->qp_vec);
err2:
- kfree(nt->mw);
+ kfree(nt->mw_vec);
err1:
- ntb_unregister_transport(nt->ndev);
+ while (i--) {
+ mw = &nt->mw_vec[i];
+ iounmap(mw->vbase);
+ }
err:
kfree(nt);
return rc;
}
-void ntb_transport_free(void *transport)
+static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
{
- struct ntb_transport *nt = transport;
- struct ntb_device *ndev = nt->ndev;
+ struct ntb_transport_ctx *nt = ndev->ctx;
+ struct ntb_transport_qp *qp;
+ u64 qp_bitmap_alloc;
int i;
ntb_transport_link_cleanup(nt);
+ cancel_work_sync(&nt->link_cleanup);
+ cancel_delayed_work_sync(&nt->link_work);
+
+ qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
/* verify that all the qp's are freed */
- for (i = 0; i < nt->max_qps; i++) {
- if (!test_bit(i, &nt->qp_bitmap))
- ntb_transport_free_queue(&nt->qps[i]);
- debugfs_remove_recursive(nt->qps[i].debugfs_dir);
+ for (i = 0; i < nt->qp_count; i++) {
+ qp = &nt->qp_vec[i];
+ if (qp_bitmap_alloc & BIT_ULL(i))
+ ntb_transport_free_queue(qp);
+ debugfs_remove_recursive(qp->debugfs_dir);
}
- ntb_bus_remove(nt);
+ ntb_link_disable(ndev);
+ ntb_clear_ctx(ndev);
- cancel_delayed_work_sync(&nt->link_work);
-
- ntb_unregister_event_callback(ndev);
+ ntb_bus_remove(nt);
- for (i = 0; i < ntb_max_mw(ndev); i++)
+ for (i = nt->mw_count; i--; ) {
ntb_free_mw(nt, i);
+ iounmap(nt->mw_vec[i].vbase);
+ }
- kfree(nt->qps);
- kfree(nt->mw);
- ntb_unregister_transport(ndev);
+ kfree(nt->qp_vec);
+ kfree(nt->mw_vec);
kfree(nt);
}
@@ -1028,15 +1115,13 @@ static void ntb_rx_copy_callback(void *data)
unsigned int len = entry->len;
struct ntb_payload_header *hdr = entry->rx_hdr;
- /* Ensure that the data is fully copied out before clearing the flag */
- wmb();
hdr->flags = 0;
iowrite32(entry->index, &qp->rx_info->entry);
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
- if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+ if (qp->rx_handler && qp->client_ready)
qp->rx_handler(qp, qp->cb_data, cb_data, len);
}
@@ -1047,6 +1132,9 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
memcpy(buf, offset, len);
+ /* Ensure that the data is fully copied out before clearing the flag */
+ wmb();
+
ntb_rx_copy_callback(entry);
}
@@ -1071,8 +1159,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err_wait;
device = chan->device;
- pay_off = (size_t) offset & ~PAGE_MASK;
- buff_off = (size_t) buf & ~PAGE_MASK;
+ pay_off = (size_t)offset & ~PAGE_MASK;
+ buff_off = (size_t)buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
goto err_wait;
@@ -1138,86 +1226,103 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
+ int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
- if (!entry) {
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
- "no buffer - HDR ver %u, len %d, flags %x\n",
- hdr->ver, hdr->len, hdr->flags);
- qp->rx_err_no_buf++;
- return -ENOMEM;
- }
+ dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
+ qp->qp_num, hdr->ver, hdr->len, hdr->flags);
if (!(hdr->flags & DESC_DONE_FLAG)) {
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
- &qp->rx_pend_q);
+ dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
qp->rx_ring_empty++;
return -EAGAIN;
}
- if (hdr->ver != (u32) qp->rx_pkts) {
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
- "qp %d: version mismatch, expected %llu - got %u\n",
- qp->qp_num, qp->rx_pkts, hdr->ver);
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
- &qp->rx_pend_q);
+ if (hdr->flags & LINK_DOWN_FLAG) {
+ dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
+ ntb_qp_link_down(qp);
+ hdr->flags = 0;
+ return -EAGAIN;
+ }
+
+ if (hdr->ver != (u32)qp->rx_pkts) {
+ dev_dbg(&qp->ndev->pdev->dev,
+ "version mismatch, expected %llu - got %u\n",
+ qp->rx_pkts, hdr->ver);
qp->rx_err_ver++;
return -EIO;
}
- if (hdr->flags & LINK_DOWN_FLAG) {
- ntb_qp_link_down(qp);
+ entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ if (!entry) {
+ dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
+ qp->rx_err_no_buf++;
+ rc = -ENOMEM;
goto err;
}
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
- "rx offset %u, ver %u - %d payload received, buf size %d\n",
- qp->rx_index, hdr->ver, hdr->len, entry->len);
-
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
-
if (hdr->len > entry->len) {
- qp->rx_err_oflow++;
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
- "RX overflow! Wanted %d got %d\n",
+ dev_dbg(&qp->ndev->pdev->dev,
+ "receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
+ qp->rx_err_oflow++;
+ rc = -EIO;
goto err;
}
+ dev_dbg(&qp->ndev->pdev->dev,
+ "RX OK index %u ver %u size %d into buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
+
entry->index = qp->rx_index;
entry->rx_hdr = hdr;
ntb_async_rx(entry, offset, hdr->len);
-out:
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
err:
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
- /* Ensure that the data is fully copied out before clearing the flag */
- wmb();
+ /* FIXME: if this syncrhonous update of the rx_index gets ahead of
+ * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
+ * scenarios:
+ *
+ * 1) The peer might miss this update, but observe the update
+ * from the memcpy completion callback. In this case, the buffer will
+ * not be freed on the peer to be reused for a different packet. The
+ * successful rx of a later packet would clear the condition, but the
+ * condition could persist if several rx fail in a row.
+ *
+ * 2) The peer may observe this update before the asyncrhonous copy of
+ * prior packets is completed. The peer may overwrite the buffers of
+ * the prior packets before they are copied.
+ *
+ * 3) Both: the peer may observe the update, and then observe the index
+ * decrement by the asynchronous completion callback. Who knows what
+ * badness that will cause.
+ */
hdr->flags = 0;
iowrite32(qp->rx_index, &qp->rx_info->entry);
- goto out;
+ return rc;
}
-static int ntb_transport_rxc_db(void *data, int db_num)
+static void ntb_transport_rxc_db(unsigned long data)
{
- struct ntb_transport_qp *qp = data;
+ struct ntb_transport_qp *qp = (void *)data;
int rc, i;
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
- __func__, db_num);
+ dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
+ __func__, qp->qp_num);
/* Limit the number of packets processed in a single interrupt to
* provide fairness to others
@@ -1231,7 +1336,21 @@ static int ntb_transport_rxc_db(void *data, int db_num)
if (qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
- return i;
+ if (i == qp->rx_max_entry) {
+ /* there is more work to do */
+ tasklet_schedule(&qp->rxc_db_work);
+ } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
+ /* the doorbell bit is set: clear it */
+ ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
+ /* ntb_db_read ensures ntb_db_clear write is committed */
+ ntb_db_read(qp->ndev);
+
+ /* an interrupt may have arrived between finishing
+ * ntb_process_rxc and clearing the doorbell bit:
+ * there might be some more work to do.
+ */
+ tasklet_schedule(&qp->rxc_db_work);
+ }
}
static void ntb_tx_copy_callback(void *data)
@@ -1240,11 +1359,9 @@ static void ntb_tx_copy_callback(void *data)
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
- /* Ensure that the data is fully copied out before setting the flags */
- wmb();
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
- ntb_ring_doorbell(qp->ndev, qp->qp_num);
+ ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
/* The entry length can only be zero if the packet is intended to be a
* "link down" or similar. Since no payload is being sent in these
@@ -1263,7 +1380,18 @@ static void ntb_tx_copy_callback(void *data)
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
{
+#ifdef ARCH_HAS_NOCACHE_UACCESS
+ /*
+ * Using non-temporal mov to improve performance on non-cached
+ * writes, even though we aren't actually copying from user space.
+ */
+ __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
+#else
memcpy_toio(offset, entry->buf, entry->len);
+#endif
+
+ /* Ensure that the data is fully copied out before setting the flags */
+ wmb();
ntb_tx_copy_callback(entry);
}
@@ -1288,7 +1416,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
entry->tx_hdr = hdr;
iowrite32(entry->len, &hdr->len);
- iowrite32((u32) qp->tx_pkts, &hdr->ver);
+ iowrite32((u32)qp->tx_pkts, &hdr->ver);
if (!chan)
goto err;
@@ -1298,8 +1426,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
- buff_off = (size_t) buf & ~PAGE_MASK;
- dest_off = (size_t) dest & ~PAGE_MASK;
+ buff_off = (size_t)buf & ~PAGE_MASK;
+ dest_off = (size_t)dest & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
@@ -1347,9 +1475,6 @@ err:
static int ntb_process_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
- qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
- entry->buf);
if (qp->tx_index == qp->remote_rx_info->entry) {
qp->tx_ring_full++;
return -EAGAIN;
@@ -1376,15 +1501,14 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
static void ntb_send_link_down(struct ntb_transport_qp *qp)
{
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_queue_entry *entry;
int i, rc;
- if (qp->qp_link == NTB_LINK_DOWN)
+ if (!qp->link_is_up)
return;
- qp->qp_link = NTB_LINK_DOWN;
- dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+ dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
@@ -1405,6 +1529,13 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
if (rc)
dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
qp->qp_num);
+
+ ntb_qp_link_down_reset(qp);
+}
+
+static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+ return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
}
/**
@@ -1422,18 +1553,25 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
* RETURNS: pointer to newly created ntb_queue, NULL on error.
*/
struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+ntb_transport_create_queue(void *data, struct device *client_dev,
const struct ntb_queue_handlers *handlers)
{
+ struct ntb_dev *ndev;
+ struct pci_dev *pdev;
+ struct ntb_transport_ctx *nt;
struct ntb_queue_entry *entry;
struct ntb_transport_qp *qp;
- struct ntb_transport *nt;
+ u64 qp_bit;
unsigned int free_queue;
- int rc, i;
+ dma_cap_mask_t dma_mask;
+ int node;
+ int i;
- nt = ntb_find_transport(pdev);
- if (!nt)
- goto err;
+ ndev = dev_ntb(client_dev->parent);
+ pdev = ndev->pdev;
+ nt = ndev->ctx;
+
+ node = dev_to_node(&ndev->dev);
free_queue = ffs(nt->qp_bitmap);
if (!free_queue)
@@ -1442,23 +1580,31 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
/* decrement free_queue to make it zero based */
free_queue--;
- clear_bit(free_queue, &nt->qp_bitmap);
+ qp = &nt->qp_vec[free_queue];
+ qp_bit = BIT_ULL(qp->qp_num);
+
+ nt->qp_bitmap_free &= ~qp_bit;
- qp = &nt->qps[free_queue];
qp->cb_data = data;
qp->rx_handler = handlers->rx_handler;
qp->tx_handler = handlers->tx_handler;
qp->event_handler = handlers->event_handler;
- dmaengine_get();
- qp->dma_chan = dma_find_channel(DMA_MEMCPY);
- if (!qp->dma_chan) {
- dmaengine_put();
- dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
+ dma_cap_zero(dma_mask);
+ dma_cap_set(DMA_MEMCPY, dma_mask);
+
+ if (use_dma) {
+ qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
+ (void *)(unsigned long)node);
+ if (!qp->dma_chan)
+ dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
+ } else {
+ qp->dma_chan = NULL;
}
+ dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
- entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry)
goto err1;
@@ -1468,7 +1614,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
}
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
- entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry)
goto err2;
@@ -1477,10 +1623,8 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
&qp->tx_free_q);
}
- rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
- ntb_transport_rxc_db);
- if (rc)
- goto err2;
+ ntb_db_clear(qp->ndev, qp_bit);
+ ntb_db_clear_mask(qp->ndev, qp_bit);
dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
@@ -1493,8 +1637,8 @@ err1:
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->dma_chan)
- dmaengine_put();
- set_bit(free_queue, &nt->qp_bitmap);
+ dma_release_channel(qp->dma_chan);
+ nt->qp_bitmap_free |= qp_bit;
err:
return NULL;
}
@@ -1508,13 +1652,15 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
+ u64 qp_bit;
if (!qp)
return;
- pdev = ntb_query_pdev(qp->ndev);
+ pdev = qp->ndev->pdev;
if (qp->dma_chan) {
struct dma_chan *chan = qp->dma_chan;
@@ -1528,13 +1674,21 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
*/
dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan);
- dmaengine_put();
+ dma_release_channel(chan);
}
- ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+ qp_bit = BIT_ULL(qp->qp_num);
+
+ ntb_db_set_mask(qp->ndev, qp_bit);
+ tasklet_disable(&qp->rxc_db_work);
cancel_delayed_work_sync(&qp->link_work);
+ qp->cb_data = NULL;
+ qp->rx_handler = NULL;
+ qp->tx_handler = NULL;
+ qp->event_handler = NULL;
+
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry);
@@ -1546,7 +1700,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
- set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+ nt->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
@@ -1567,7 +1721,7 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
struct ntb_queue_entry *entry;
void *buf;
- if (!qp || qp->client_ready == NTB_LINK_UP)
+ if (!qp || qp->client_ready)
return NULL;
entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
@@ -1636,7 +1790,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
- if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+ if (!qp || !qp->link_is_up || !len)
return -EINVAL;
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
@@ -1670,9 +1824,9 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp)
if (!qp)
return;
- qp->client_ready = NTB_LINK_UP;
+ qp->client_ready = true;
- if (qp->transport->transport_link == NTB_LINK_UP)
+ if (qp->transport->link_is_up)
schedule_delayed_work(&qp->link_work, 0);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_up);
@@ -1688,27 +1842,20 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
struct pci_dev *pdev;
- int rc, val;
+ int val;
if (!qp)
return;
- pdev = ntb_query_pdev(qp->ndev);
- qp->client_ready = NTB_LINK_DOWN;
+ pdev = qp->ndev->pdev;
+ qp->client_ready = false;
- rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
- return;
- }
+ val = ntb_spad_read(qp->ndev, QP_LINKS);
- rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
- val & ~(1 << qp->qp_num));
- if (rc)
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- val & ~(1 << qp->qp_num), QP_LINKS);
+ ntb_peer_spad_write(qp->ndev, QP_LINKS,
+ val & ~BIT(qp->qp_num));
- if (qp->qp_link == NTB_LINK_UP)
+ if (qp->link_is_up)
ntb_send_link_down(qp);
else
cancel_delayed_work_sync(&qp->link_work);
@@ -1728,7 +1875,7 @@ bool ntb_transport_link_query(struct ntb_transport_qp *qp)
if (!qp)
return false;
- return qp->qp_link == NTB_LINK_UP;
+ return qp->link_is_up;
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1774,3 +1921,71 @@ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
return max;
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
+
+static void ntb_transport_doorbell_callback(void *data, int vector)
+{
+ struct ntb_transport_ctx *nt = data;
+ struct ntb_transport_qp *qp;
+ u64 db_bits;
+ unsigned int qp_num;
+
+ db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
+ ntb_db_vector_mask(nt->ndev, vector));
+
+ while (db_bits) {
+ qp_num = __ffs(db_bits);
+ qp = &nt->qp_vec[qp_num];
+
+ tasklet_schedule(&qp->rxc_db_work);
+
+ db_bits &= ~BIT_ULL(qp_num);
+ }
+}
+
+static const struct ntb_ctx_ops ntb_transport_ops = {
+ .link_event = ntb_transport_event_callback,
+ .db_event = ntb_transport_doorbell_callback,
+};
+
+static struct ntb_client ntb_transport_client = {
+ .ops = {
+ .probe = ntb_transport_probe,
+ .remove = ntb_transport_free,
+ },
+};
+
+static int __init ntb_transport_init(void)
+{
+ int rc;
+
+ pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
+
+ if (debugfs_initialized())
+ nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ rc = bus_register(&ntb_transport_bus);
+ if (rc)
+ goto err_bus;
+
+ rc = ntb_register_client(&ntb_transport_client);
+ if (rc)
+ goto err_client;
+
+ return 0;
+
+err_client:
+ bus_unregister(&ntb_transport_bus);
+err_bus:
+ debugfs_remove_recursive(nt_debugfs_dir);
+ return rc;
+}
+module_init(ntb_transport_init);
+
+static void __exit ntb_transport_exit(void)
+{
+ debugfs_remove_recursive(nt_debugfs_dir);
+
+ ntb_unregister_client(&ntb_transport_client);
+ bus_unregister(&ntb_transport_bus);
+}
+module_exit(ntb_transport_exit);
diff --git a/drivers/ntb/test/Kconfig b/drivers/ntb/test/Kconfig
new file mode 100644
index 000000000000..01852f98a843
--- /dev/null
+++ b/drivers/ntb/test/Kconfig
@@ -0,0 +1,19 @@
+config NTB_PINGPONG
+ tristate "NTB Ping Pong Test Client"
+ help
+ This is a simple ping pong driver that exercises the scratchpads and
+ doorbells of the ntb hardware. This driver may be used to test that
+ your ntb hardware and drivers are functioning at a basic level.
+
+ If unsure, say N.
+
+config NTB_TOOL
+ tristate "NTB Debugging Tool Test Client"
+ help
+ This is a simple debugging driver that enables the doorbell and
+ scratchpad registers to be read and written from the debugfs. This
+ enables more complicated debugging to be scripted from user space.
+ This driver may be used to test that your ntb hardware and drivers are
+ functioning at a basic level.
+
+ If unsure, say N.
diff --git a/drivers/ntb/test/Makefile b/drivers/ntb/test/Makefile
new file mode 100644
index 000000000000..0ea32a324b6c
--- /dev/null
+++ b/drivers/ntb/test/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_NTB_PINGPONG) += ntb_pingpong.o
+obj-$(CONFIG_NTB_TOOL) += ntb_tool.o
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
new file mode 100644
index 000000000000..fe1600566981
--- /dev/null
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -0,0 +1,250 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Pingpong Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/* Note: load this module with option 'dyndbg=+p' */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME "ntb_pingpong"
+#define DRIVER_DESCRIPTION "PCIe NTB Simple Pingpong Client"
+
+#define DRIVER_LICENSE "Dual BSD/GPL"
+#define DRIVER_VERSION "1.0"
+#define DRIVER_RELDATE "24 March 2015"
+#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static unsigned int unsafe;
+module_param(unsafe, uint, 0644);
+MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe");
+
+static unsigned int delay_ms = 1000;
+module_param(delay_ms, uint, 0644);
+MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
+
+static unsigned long db_init = 0x7;
+module_param(db_init, ulong, 0644);
+MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer");
+
+struct pp_ctx {
+ struct ntb_dev *ntb;
+ u64 db_bits;
+ /* synchronize access to db_bits by ping and pong */
+ spinlock_t db_lock;
+ struct timer_list db_timer;
+ unsigned long db_delay;
+};
+
+static void pp_ping(unsigned long ctx)
+{
+ struct pp_ctx *pp = (void *)ctx;
+ unsigned long irqflags;
+ u64 db_bits, db_mask;
+ u32 spad_rd, spad_wr;
+
+ spin_lock_irqsave(&pp->db_lock, irqflags);
+ {
+ db_mask = ntb_db_valid_mask(pp->ntb);
+ db_bits = ntb_db_read(pp->ntb);
+
+ if (db_bits) {
+ dev_dbg(&pp->ntb->dev,
+ "Masked pongs %#llx\n",
+ db_bits);
+ ntb_db_clear(pp->ntb, db_bits);
+ }
+
+ db_bits = ((pp->db_bits | db_bits) << 1) & db_mask;
+
+ if (!db_bits)
+ db_bits = db_init;
+
+ spad_rd = ntb_spad_read(pp->ntb, 0);
+ spad_wr = spad_rd + 1;
+
+ dev_dbg(&pp->ntb->dev,
+ "Ping bits %#llx read %#x write %#x\n",
+ db_bits, spad_rd, spad_wr);
+
+ ntb_peer_spad_write(pp->ntb, 0, spad_wr);
+ ntb_peer_db_set(pp->ntb, db_bits);
+ ntb_db_clear_mask(pp->ntb, db_mask);
+
+ pp->db_bits = 0;
+ }
+ spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static void pp_link_event(void *ctx)
+{
+ struct pp_ctx *pp = ctx;
+
+ if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
+ dev_dbg(&pp->ntb->dev, "link is up\n");
+ pp_ping((unsigned long)pp);
+ } else {
+ dev_dbg(&pp->ntb->dev, "link is down\n");
+ del_timer(&pp->db_timer);
+ }
+}
+
+static void pp_db_event(void *ctx, int vec)
+{
+ struct pp_ctx *pp = ctx;
+ u64 db_bits, db_mask;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&pp->db_lock, irqflags);
+ {
+ db_mask = ntb_db_vector_mask(pp->ntb, vec);
+ db_bits = db_mask & ntb_db_read(pp->ntb);
+ ntb_db_set_mask(pp->ntb, db_mask);
+ ntb_db_clear(pp->ntb, db_bits);
+
+ pp->db_bits |= db_bits;
+
+ mod_timer(&pp->db_timer, jiffies + pp->db_delay);
+
+ dev_dbg(&pp->ntb->dev,
+ "Pong vec %d bits %#llx\n",
+ vec, db_bits);
+ }
+ spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static const struct ntb_ctx_ops pp_ops = {
+ .link_event = pp_link_event,
+ .db_event = pp_db_event,
+};
+
+static int pp_probe(struct ntb_client *client,
+ struct ntb_dev *ntb)
+{
+ struct pp_ctx *pp;
+ int rc;
+
+ if (ntb_db_is_unsafe(ntb)) {
+ dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+ if (!unsafe) {
+ rc = -EINVAL;
+ goto err_pp;
+ }
+ }
+
+ if (ntb_spad_is_unsafe(ntb)) {
+ dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+ if (!unsafe) {
+ rc = -EINVAL;
+ goto err_pp;
+ }
+ }
+
+ pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ rc = -ENOMEM;
+ goto err_pp;
+ }
+
+ pp->ntb = ntb;
+ pp->db_bits = 0;
+ spin_lock_init(&pp->db_lock);
+ setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+ pp->db_delay = msecs_to_jiffies(delay_ms);
+
+ rc = ntb_set_ctx(ntb, pp, &pp_ops);
+ if (rc)
+ goto err_ctx;
+
+ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ ntb_link_event(ntb);
+
+ return 0;
+
+err_ctx:
+ kfree(pp);
+err_pp:
+ return rc;
+}
+
+static void pp_remove(struct ntb_client *client,
+ struct ntb_dev *ntb)
+{
+ struct pp_ctx *pp = ntb->ctx;
+
+ ntb_clear_ctx(ntb);
+ del_timer_sync(&pp->db_timer);
+ ntb_link_disable(ntb);
+
+ kfree(pp);
+}
+
+static struct ntb_client pp_client = {
+ .ops = {
+ .probe = pp_probe,
+ .remove = pp_remove,
+ },
+};
+module_ntb_client(pp_client);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
new file mode 100644
index 000000000000..6f5dc6ca673d
--- /dev/null
+++ b/drivers/ntb/test/ntb_tool.c
@@ -0,0 +1,556 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Debugging Tool Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/*
+ * How to use this tool, by example.
+ *
+ * Assuming $DBG_DIR is something like:
+ * '/sys/kernel/debug/ntb_tool/0000:00:03.0'
+ *
+ * Eg: check if clearing the doorbell mask generates an interrupt.
+ *
+ * # Set the doorbell mask
+ * root@self# echo 's 1' > $DBG_DIR/mask
+ *
+ * # Ring the doorbell from the peer
+ * root@peer# echo 's 1' > $DBG_DIR/peer_db
+ *
+ * # Clear the doorbell mask
+ * root@self# echo 'c 1' > $DBG_DIR/mask
+ *
+ * Observe debugging output in dmesg or your console. You should see a
+ * doorbell event triggered by clearing the mask. If not, this may indicate an
+ * issue with the hardware that needs to be worked around in the driver.
+ *
+ * Eg: read and write scratchpad registers
+ *
+ * root@peer# echo '0 0x01010101 1 0x7f7f7f7f' > $DBG_DIR/peer_spad
+ *
+ * root@self# cat $DBG_DIR/spad
+ *
+ * Observe that spad 0 and 1 have the values set by the peer.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME "ntb_tool"
+#define DRIVER_DESCRIPTION "PCIe NTB Debugging Tool"
+
+#define DRIVER_LICENSE "Dual BSD/GPL"
+#define DRIVER_VERSION "1.0"
+#define DRIVER_RELDATE "22 April 2015"
+#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct dentry *tool_dbgfs;
+
+struct tool_ctx {
+ struct ntb_dev *ntb;
+ struct dentry *dbgfs;
+};
+
+#define SPAD_FNAME_SIZE 0x10
+#define INT_PTR(x) ((void *)(unsigned long)x)
+#define PTR_INT(x) ((int)(unsigned long)x)
+
+#define TOOL_FOPS_RDWR(__name, __read, __write) \
+ const struct file_operations __name = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = __read, \
+ .write = __write, \
+ }
+
+static void tool_link_event(void *ctx)
+{
+ struct tool_ctx *tc = ctx;
+ enum ntb_speed speed;
+ enum ntb_width width;
+ int up;
+
+ up = ntb_link_is_up(tc->ntb, &speed, &width);
+
+ dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
+ up ? "up" : "down", speed, width);
+}
+
+static void tool_db_event(void *ctx, int vec)
+{
+ struct tool_ctx *tc = ctx;
+ u64 db_bits, db_mask;
+
+ db_mask = ntb_db_vector_mask(tc->ntb, vec);
+ db_bits = ntb_db_read(tc->ntb);
+
+ dev_dbg(&tc->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
+ vec, db_mask, db_bits);
+}
+
+static const struct ntb_ctx_ops tool_ops = {
+ .link_event = tool_link_event,
+ .db_event = tool_db_event,
+};
+
+static ssize_t tool_dbfn_read(struct tool_ctx *tc, char __user *ubuf,
+ size_t size, loff_t *offp,
+ u64 (*db_read_fn)(struct ntb_dev *))
+{
+ size_t buf_size;
+ char *buf;
+ ssize_t pos, rc;
+
+ if (!db_read_fn)
+ return -EINVAL;
+
+ buf_size = min_t(size_t, size, 0x20);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, buf_size, "%#llx\n",
+ db_read_fn(tc->ntb));
+
+ rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+ kfree(buf);
+
+ return rc;
+}
+
+static ssize_t tool_dbfn_write(struct tool_ctx *tc,
+ const char __user *ubuf,
+ size_t size, loff_t *offp,
+ int (*db_set_fn)(struct ntb_dev *, u64),
+ int (*db_clear_fn)(struct ntb_dev *, u64))
+{
+ u64 db_bits;
+ char *buf, cmd;
+ ssize_t rc;
+ int n;
+
+ buf = kmalloc(size + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+ if (rc < 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ buf[size] = 0;
+
+ n = sscanf(buf, "%c %lli", &cmd, &db_bits);
+
+ kfree(buf);
+
+ if (n != 2) {
+ rc = -EINVAL;
+ } else if (cmd == 's') {
+ if (!db_set_fn)
+ rc = -EINVAL;
+ else
+ rc = db_set_fn(tc->ntb, db_bits);
+ } else if (cmd == 'c') {
+ if (!db_clear_fn)
+ rc = -EINVAL;
+ else
+ rc = db_clear_fn(tc->ntb, db_bits);
+ } else {
+ rc = -EINVAL;
+ }
+
+ return rc ? : size;
+}
+
+static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
+ size_t size, loff_t *offp,
+ u32 (*spad_read_fn)(struct ntb_dev *, int))
+{
+ size_t buf_size;
+ char *buf;
+ ssize_t pos, rc;
+ int i, spad_count;
+
+ if (!spad_read_fn)
+ return -EINVAL;
+
+ buf_size = min_t(size_t, size, 0x100);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = 0;
+
+ spad_count = ntb_spad_count(tc->ntb);
+ for (i = 0; i < spad_count; ++i) {
+ pos += scnprintf(buf + pos, buf_size - pos, "%d\t%#x\n",
+ i, spad_read_fn(tc->ntb, i));
+ }
+
+ rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+ kfree(buf);
+
+ return rc;
+}
+
+static ssize_t tool_spadfn_write(struct tool_ctx *tc,
+ const char __user *ubuf,
+ size_t size, loff_t *offp,
+ int (*spad_write_fn)(struct ntb_dev *,
+ int, u32))
+{
+ int spad_idx;
+ u32 spad_val;
+ char *buf;
+ int pos, n;
+ ssize_t rc;
+
+ if (!spad_write_fn) {
+ dev_dbg(&tc->ntb->dev, "no spad write fn\n");
+ return -EINVAL;
+ }
+
+ buf = kmalloc(size + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+ if (rc < 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ buf[size] = 0;
+
+ n = sscanf(buf, "%d %i%n", &spad_idx, &spad_val, &pos);
+ while (n == 2) {
+ rc = spad_write_fn(tc->ntb, spad_idx, spad_val);
+ if (rc)
+ break;
+
+ n = sscanf(buf + pos, "%d %i%n", &spad_idx, &spad_val, &pos);
+ }
+
+ if (n < 0)
+ rc = n;
+
+ kfree(buf);
+
+ return rc ? : size;
+}
+
+static ssize_t tool_db_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->db_read);
+}
+
+static ssize_t tool_db_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->db_set,
+ tc->ntb->ops->db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_db_fops,
+ tool_db_read,
+ tool_db_write);
+
+static ssize_t tool_mask_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->db_read_mask);
+}
+
+static ssize_t tool_mask_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->db_set_mask,
+ tc->ntb->ops->db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_mask_fops,
+ tool_mask_read,
+ tool_mask_write);
+
+static ssize_t tool_peer_db_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_db_read);
+}
+
+static ssize_t tool_peer_db_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_db_set,
+ tc->ntb->ops->peer_db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_db_fops,
+ tool_peer_db_read,
+ tool_peer_db_write);
+
+static ssize_t tool_peer_mask_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_db_read_mask);
+}
+
+static ssize_t tool_peer_mask_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_dbfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_db_set_mask,
+ tc->ntb->ops->peer_db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mask_fops,
+ tool_peer_mask_read,
+ tool_peer_mask_write);
+
+static ssize_t tool_spad_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_spadfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->spad_read);
+}
+
+static ssize_t tool_spad_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_spadfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_spad_fops,
+ tool_spad_read,
+ tool_spad_write);
+
+static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_spadfn_read(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_spad_read);
+}
+
+static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+
+ return tool_spadfn_write(tc, ubuf, size, offp,
+ tc->ntb->ops->peer_spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_spad_fops,
+ tool_peer_spad_read,
+ tool_peer_spad_write);
+
+static void tool_setup_dbgfs(struct tool_ctx *tc)
+{
+ /* This modules is useless without dbgfs... */
+ if (!tool_dbgfs) {
+ tc->dbgfs = NULL;
+ return;
+ }
+
+ tc->dbgfs = debugfs_create_dir(dev_name(&tc->ntb->dev),
+ tool_dbgfs);
+ if (!tc->dbgfs)
+ return;
+
+ debugfs_create_file("db", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_db_fops);
+
+ debugfs_create_file("mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_mask_fops);
+
+ debugfs_create_file("peer_db", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_peer_db_fops);
+
+ debugfs_create_file("peer_mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_peer_mask_fops);
+
+ debugfs_create_file("spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_spad_fops);
+
+ debugfs_create_file("peer_spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_peer_spad_fops);
+}
+
+static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
+{
+ struct tool_ctx *tc;
+ int rc;
+
+ if (ntb_db_is_unsafe(ntb))
+ dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+
+ if (ntb_spad_is_unsafe(ntb))
+ dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+
+ tc = kmalloc(sizeof(*tc), GFP_KERNEL);
+ if (!tc) {
+ rc = -ENOMEM;
+ goto err_tc;
+ }
+
+ tc->ntb = ntb;
+
+ tool_setup_dbgfs(tc);
+
+ rc = ntb_set_ctx(ntb, tc, &tool_ops);
+ if (rc)
+ goto err_ctx;
+
+ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ ntb_link_event(ntb);
+
+ return 0;
+
+err_ctx:
+ debugfs_remove_recursive(tc->dbgfs);
+ kfree(tc);
+err_tc:
+ return rc;
+}
+
+static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
+{
+ struct tool_ctx *tc = ntb->ctx;
+
+ ntb_clear_ctx(ntb);
+ ntb_link_disable(ntb);
+
+ debugfs_remove_recursive(tc->dbgfs);
+ kfree(tc);
+}
+
+static struct ntb_client tool_client = {
+ .ops = {
+ .probe = tool_probe,
+ .remove = tool_remove,
+ },
+};
+
+static int __init tool_init(void)
+{
+ int rc;
+
+ if (debugfs_initialized())
+ tool_dbgfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ rc = ntb_register_client(&tool_client);
+ if (rc)
+ goto err_client;
+
+ return 0;
+
+err_client:
+ debugfs_remove_recursive(tool_dbgfs);
+ return rc;
+}
+module_init(tool_init);
+
+static void __exit tool_exit(void)
+{
+ ntb_unregister_client(&tool_client);
+ debugfs_remove_recursive(tool_dbgfs);
+}
+module_exit(tool_exit);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8eb22c0ca7ce..7e2c43f701bc 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
__func__, dimm_name, cmd_name, i);
return -ENXIO;
}
- if (!access_ok(VERIFY_READ, p + in_len, in_size))
- return -EFAULT;
if (in_len < sizeof(in_env))
copy = min_t(u32, sizeof(in_env) - in_len, in_size);
else
@@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
__func__, dimm_name, cmd_name, i);
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
- return -EFAULT;
if (out_len < sizeof(out_env))
copy = min_t(u32, sizeof(out_env) - out_len, out_size);
else
@@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
buf_len = out_len + in_len;
- if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
- return -EFAULT;
-
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@ int __init nvdimm_bus_init(void)
nvdimm_major = rc;
nd_class = class_create(THIS_MODULE, "nd");
- if (IS_ERR(nd_class))
+ if (IS_ERR(nd_class)) {
+ rc = PTR_ERR(nd_class);
goto err_class;
+ }
return 0;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1e2f57f6d297..6dc13e4de396 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -912,4 +912,11 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config INTEL_PMC_IPC
+ tristate "Intel PMC IPC Driver"
+ ---help---
+ This driver provides support for PMC control on some Intel platforms.
+ The PMC is an ARC processor which defines IPC commands for communication
+ with other entities in the CPU.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index b3e54ed863c3..dda95a985321 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 35de903cb506..ed317ccac4a2 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -307,7 +307,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
};
static struct calling_interface_buffer *buffer;
-static struct page *bufferpage;
static DEFINE_MUTEX(buffer_mutex);
static int hwswitch_state;
@@ -424,45 +423,125 @@ static inline int dell_smi_error(int value)
}
}
-/* Derived from information in DellWirelessCtl.cpp:
- Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
- Input byte 0 = 0: Wireless information
-
- result[0]: return code
- result[1]:
- Bit 0: Hardware switch supported
- Bit 1: Wifi locator supported
- Bit 2: Wifi is supported
- Bit 3: Bluetooth is supported
- Bit 4: WWAN is supported
- Bit 5: Wireless keyboard supported
- Bits 6-7: Reserved
- Bit 8: Wifi is installed
- Bit 9: Bluetooth is installed
- Bit 10: WWAN is installed
- Bits 11-15: Reserved
- Bit 16: Hardware switch is on
- Bit 17: Wifi is blocked
- Bit 18: Bluetooth is blocked
- Bit 19: WWAN is blocked
- Bits 20-31: Reserved
- result[2]: NVRAM size in bytes
- result[3]: NVRAM format version number
-
- Input byte 0 = 2: Wireless switch configuration
- result[0]: return code
- result[1]:
- Bit 0: Wifi controlled by switch
- Bit 1: Bluetooth controlled by switch
- Bit 2: WWAN controlled by switch
- Bits 3-6: Reserved
- Bit 7: Wireless switch config locked
- Bit 8: Wifi locator enabled
- Bits 9-14: Reserved
- Bit 15: Wifi locator setting locked
- Bits 16-31: Reserved
-*/
+/*
+ * Derived from information in smbios-wireless-ctl:
+ *
+ * cbSelect 17, Value 11
+ *
+ * Return Wireless Info
+ * cbArg1, byte0 = 0x00
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 Info bit flags:
+ *
+ * 0 Hardware switch supported (1)
+ * 1 WiFi locator supported (1)
+ * 2 WLAN supported (1)
+ * 3 Bluetooth (BT) supported (1)
+ * 4 WWAN supported (1)
+ * 5 Wireless KBD supported (1)
+ * 6 Uw b supported (1)
+ * 7 WiGig supported (1)
+ * 8 WLAN installed (1)
+ * 9 BT installed (1)
+ * 10 WWAN installed (1)
+ * 11 Uw b installed (1)
+ * 12 WiGig installed (1)
+ * 13-15 Reserved (0)
+ * 16 Hardware (HW) switch is On (1)
+ * 17 WLAN disabled (1)
+ * 18 BT disabled (1)
+ * 19 WWAN disabled (1)
+ * 20 Uw b disabled (1)
+ * 21 WiGig disabled (1)
+ * 20-31 Reserved (0)
+ *
+ * cbRes3 NVRAM size in bytes
+ * cbRes4, byte 0 NVRAM format version number
+ *
+ *
+ * Set QuickSet Radio Disable Flag
+ * cbArg1, byte0 = 0x01
+ * cbArg1, byte1
+ * Radio ID value:
+ * 0 Radio Status
+ * 1 WLAN ID
+ * 2 BT ID
+ * 3 WWAN ID
+ * 4 UWB ID
+ * 5 WIGIG ID
+ * cbArg1, byte2 Flag bits:
+ * 0 QuickSet disables radio (1)
+ * 1-7 Reserved (0)
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 QuickSet (QS) radio disable bit map:
+ * 0 QS disables WLAN
+ * 1 QS disables BT
+ * 2 QS disables WWAN
+ * 3 QS disables UWB
+ * 4 QS disables WIGIG
+ * 5-31 Reserved (0)
+ *
+ * Wireless Switch Configuration
+ * cbArg1, byte0 = 0x02
+ *
+ * cbArg1, byte1
+ * Subcommand:
+ * 0 Get config
+ * 1 Set config
+ * 2 Set WiFi locator enable/disable
+ * cbArg1,byte2
+ * Switch settings (if byte 1==1):
+ * 0 WLAN sw itch control (1)
+ * 1 BT sw itch control (1)
+ * 2 WWAN sw itch control (1)
+ * 3 UWB sw itch control (1)
+ * 4 WiGig sw itch control (1)
+ * 5-7 Reserved (0)
+ * cbArg1, byte2 Enable bits (if byte 1==2):
+ * 0 Enable WiFi locator (1)
+ *
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 QuickSet radio disable bit map:
+ * 0 WLAN controlled by sw itch (1)
+ * 1 BT controlled by sw itch (1)
+ * 2 WWAN controlled by sw itch (1)
+ * 3 UWB controlled by sw itch (1)
+ * 4 WiGig controlled by sw itch (1)
+ * 5-6 Reserved (0)
+ * 7 Wireless sw itch config locked (1)
+ * 8 WiFi locator enabled (1)
+ * 9-14 Reserved (0)
+ * 15 WiFi locator setting locked (1)
+ * 16-31 Reserved (0)
+ *
+ * Read Local Config Data (LCD)
+ * cbArg1, byte0 = 0x10
+ * cbArg1, byte1 NVRAM index low byte
+ * cbArg1, byte2 NVRAM index high byte
+ * cbRes1 Standard return codes (0, -1, -2)
+ * cbRes2 4 bytes read from LCD[index]
+ * cbRes3 4 bytes read from LCD[index+4]
+ * cbRes4 4 bytes read from LCD[index+8]
+ *
+ * Write Local Config Data (LCD)
+ * cbArg1, byte0 = 0x11
+ * cbArg1, byte1 NVRAM index low byte
+ * cbArg1, byte2 NVRAM index high byte
+ * cbArg2 4 bytes to w rite at LCD[index]
+ * cbArg3 4 bytes to w rite at LCD[index+4]
+ * cbArg4 4 bytes to w rite at LCD[index+8]
+ * cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Populate Local Config Data from NVRAM
+ * cbArg1, byte0 = 0x12
+ * cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Commit Local Config Data to NVRAM
+ * cbArg1, byte0 = 0x13
+ * cbRes1 Standard return codes (0, -1, -2)
+ */
static int dell_rfkill_set(void *data, bool blocked)
{
@@ -550,12 +629,21 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
(status & BIT(4)) >> 4);
seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n",
(status & BIT(5)) >> 5);
+ seq_printf(s, "Bit 6 : UWB supported: %lu\n",
+ (status & BIT(6)) >> 6);
+ seq_printf(s, "Bit 7 : WiGig supported: %lu\n",
+ (status & BIT(7)) >> 7);
seq_printf(s, "Bit 8 : Wifi is installed: %lu\n",
(status & BIT(8)) >> 8);
seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n",
(status & BIT(9)) >> 9);
seq_printf(s, "Bit 10: WWAN is installed: %lu\n",
(status & BIT(10)) >> 10);
+ seq_printf(s, "Bit 11: UWB installed: %lu\n",
+ (status & BIT(11)) >> 11);
+ seq_printf(s, "Bit 12: WiGig installed: %lu\n",
+ (status & BIT(12)) >> 12);
+
seq_printf(s, "Bit 16: Hardware switch is on: %lu\n",
(status & BIT(16)) >> 16);
seq_printf(s, "Bit 17: Wifi is blocked: %lu\n",
@@ -564,6 +652,10 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
(status & BIT(18)) >> 18);
seq_printf(s, "Bit 19: WWAN is blocked: %lu\n",
(status & BIT(19)) >> 19);
+ seq_printf(s, "Bit 20: UWB is blocked: %lu\n",
+ (status & BIT(20)) >> 20);
+ seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
+ (status & BIT(21)) >> 21);
seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
@@ -572,6 +664,10 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
(hwswitch_state & BIT(1)) >> 1);
seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n",
(hwswitch_state & BIT(2)) >> 2);
+ seq_printf(s, "Bit 3 : UWB controlled by switch: %lu\n",
+ (hwswitch_state & BIT(3)) >> 3);
+ seq_printf(s, "Bit 4 : WiGig controlled by switch: %lu\n",
+ (hwswitch_state & BIT(4)) >> 4);
seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n",
(hwswitch_state & BIT(7)) >> 7);
seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n",
@@ -1972,12 +2068,11 @@ static int __init dell_init(void)
* Allocate buffer below 4GB for SMI data--only 32-bit physical addr
* is passed to SMI handler.
*/
- bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
- if (!bufferpage) {
+ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ if (!buffer) {
ret = -ENOMEM;
goto fail_buffer;
}
- buffer = page_address(bufferpage);
ret = dell_setup_rfkill();
@@ -2034,7 +2129,7 @@ static int __init dell_init(void)
fail_backlight:
dell_cleanup_rfkill();
fail_rfkill:
- free_page((unsigned long)bufferpage);
+ free_page((unsigned long)buffer);
fail_buffer:
platform_device_del(platform_device);
fail_platform_device2:
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
new file mode 100644
index 000000000000..d734763dab69
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -0,0 +1,767 @@
+/*
+ * intel_pmc_ipc.c: Driver for the Intel PMC IPC mechanism
+ *
+ * (C) Copyright 2014-2015 Intel Corporation
+ *
+ * This driver is based on Intel SCU IPC driver(intel_scu_opc.c) by
+ * Sreedhara DS <sreedhara.ds@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * PMC running in ARC processor communicates with other entity running in IA
+ * core through IPC mechanism which in turn messaging between IA core ad PMC.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <asm/intel_pmc_ipc.h>
+#include <linux/mfd/lpc_ich.h>
+
+/*
+ * IPC registers
+ * The IA write to IPC_CMD command register triggers an interrupt to the ARC,
+ * The ARC handles the interrupt and services it, writing optional data to
+ * the IPC1 registers, updates the IPC_STS response register with the status.
+ */
+#define IPC_CMD 0x0
+#define IPC_CMD_MSI 0x100
+#define IPC_CMD_SIZE 16
+#define IPC_CMD_SUBCMD 12
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ 0x4
+#define IPC_STATUS_ERR 0x2
+#define IPC_STATUS_BUSY 0x1
+#define IPC_SPTR 0x08
+#define IPC_DPTR 0x0C
+#define IPC_WRITE_BUFFER 0x80
+#define IPC_READ_BUFFER 0x90
+
+/*
+ * 16-byte buffer for sending data associated with IPC command.
+ */
+#define IPC_DATA_BUFFER_SIZE 16
+
+#define IPC_LOOP_CNT 3000000
+#define IPC_MAX_SEC 3
+
+#define IPC_TRIGGER_MODE_IRQ true
+
+/* exported resources from IFWI */
+#define PLAT_RESOURCE_IPC_INDEX 0
+#define PLAT_RESOURCE_IPC_SIZE 0x1000
+#define PLAT_RESOURCE_GCR_SIZE 0x1000
+#define PLAT_RESOURCE_PUNIT_DATA_INDEX 1
+#define PLAT_RESOURCE_PUNIT_INTER_INDEX 2
+#define PLAT_RESOURCE_ACPI_IO_INDEX 0
+
+/*
+ * BIOS does not create an ACPI device for each PMC function,
+ * but exports multiple resources from one ACPI device(IPC) for
+ * multiple functions. This driver is responsible to create a
+ * platform device and to export resources for those functions.
+ */
+#define TCO_DEVICE_NAME "iTCO_wdt"
+#define SMI_EN_OFFSET 0x30
+#define SMI_EN_SIZE 4
+#define TCO_BASE_OFFSET 0x60
+#define TCO_REGS_SIZE 16
+#define PUNIT_DEVICE_NAME "intel_punit_ipc"
+
+static const int iTCO_version = 3;
+
+static struct intel_pmc_ipc_dev {
+ struct device *dev;
+ void __iomem *ipc_base;
+ bool irq_mode;
+ int irq;
+ int cmd;
+ struct completion cmd_complete;
+
+ /* The following PMC BARs share the same ACPI device with the IPC */
+ void *acpi_io_base;
+ int acpi_io_size;
+ struct platform_device *tco_dev;
+
+ /* gcr */
+ void *gcr_base;
+ int gcr_size;
+
+ /* punit */
+ void *punit_base;
+ int punit_size;
+ void *punit_base2;
+ int punit_size2;
+ struct platform_device *punit_dev;
+} ipcdev;
+
+static char *ipc_err_sources[] = {
+ [IPC_ERR_NONE] =
+ "no error",
+ [IPC_ERR_CMD_NOT_SUPPORTED] =
+ "command not supported",
+ [IPC_ERR_CMD_NOT_SERVICED] =
+ "command not serviced",
+ [IPC_ERR_UNABLE_TO_SERVICE] =
+ "unable to service",
+ [IPC_ERR_CMD_INVALID] =
+ "command invalid",
+ [IPC_ERR_CMD_FAILED] =
+ "command failed",
+ [IPC_ERR_EMSECURITY] =
+ "Invalid Battery",
+ [IPC_ERR_UNSIGNEDKERNEL] =
+ "Unsigned kernel",
+};
+
+/* Prevent concurrent calls to the PMC */
+static DEFINE_MUTEX(ipclock);
+
+static inline void ipc_send_command(u32 cmd)
+{
+ ipcdev.cmd = cmd;
+ if (ipcdev.irq_mode) {
+ reinit_completion(&ipcdev.cmd_complete);
+ cmd |= IPC_CMD_MSI;
+ }
+ writel(cmd, ipcdev.ipc_base + IPC_CMD);
+}
+
+static inline u32 ipc_read_status(void)
+{
+ return readl(ipcdev.ipc_base + IPC_STATUS);
+}
+
+static inline void ipc_data_writel(u32 data, u32 offset)
+{
+ writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
+}
+
+static inline u8 ipc_data_readb(u32 offset)
+{
+ return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static inline u32 ipc_data_readl(u32 offset)
+{
+ return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static int intel_pmc_ipc_check_status(void)
+{
+ int status;
+ int ret = 0;
+
+ if (ipcdev.irq_mode) {
+ if (0 == wait_for_completion_timeout(
+ &ipcdev.cmd_complete, IPC_MAX_SEC * HZ))
+ ret = -ETIMEDOUT;
+ } else {
+ int loop_count = IPC_LOOP_CNT;
+
+ while ((ipc_read_status() & IPC_STATUS_BUSY) && --loop_count)
+ udelay(1);
+ if (loop_count == 0)
+ ret = -ETIMEDOUT;
+ }
+
+ status = ipc_read_status();
+ if (ret == -ETIMEDOUT) {
+ dev_err(ipcdev.dev,
+ "IPC timed out, TS=0x%x, CMD=0x%x\n",
+ status, ipcdev.cmd);
+ return ret;
+ }
+
+ if (status & IPC_STATUS_ERR) {
+ int i;
+
+ ret = -EIO;
+ i = (status >> IPC_CMD_SIZE) & 0xFF;
+ if (i < ARRAY_SIZE(ipc_err_sources))
+ dev_err(ipcdev.dev,
+ "IPC failed: %s, STS=0x%x, CMD=0x%x\n",
+ ipc_err_sources[i], status, ipcdev.cmd);
+ else
+ dev_err(ipcdev.dev,
+ "IPC failed: unknown, STS=0x%x, CMD=0x%x\n",
+ status, ipcdev.cmd);
+ if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
+ ret = -EACCES;
+ }
+
+ return ret;
+}
+
+/*
+ * intel_pmc_ipc_simple_command
+ * @cmd: command
+ * @sub: sub type
+ */
+int intel_pmc_ipc_simple_command(int cmd, int sub)
+{
+ int ret;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.dev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+ ipc_send_command(sub << IPC_CMD_SUBCMD | cmd);
+ ret = intel_pmc_ipc_check_status();
+ mutex_unlock(&ipclock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
+
+/*
+ * intel_pmc_ipc_raw_cmd
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
+ */
+int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
+{
+ u32 wbuf[4] = { 0 };
+ int ret;
+ int i;
+
+ if (inlen > IPC_DATA_BUFFER_SIZE || outlen > IPC_DATA_BUFFER_SIZE / 4)
+ return -EINVAL;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.dev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+ memcpy(wbuf, in, inlen);
+ writel(dptr, ipcdev.ipc_base + IPC_DPTR);
+ writel(sptr, ipcdev.ipc_base + IPC_SPTR);
+ /* The input data register is 32bit register and inlen is in Byte */
+ for (i = 0; i < ((inlen + 3) / 4); i++)
+ ipc_data_writel(wbuf[i], 4 * i);
+ ipc_send_command((inlen << IPC_CMD_SIZE) |
+ (sub << IPC_CMD_SUBCMD) | cmd);
+ ret = intel_pmc_ipc_check_status();
+ if (!ret) {
+ /* out is read from 32bit register and outlen is in 32bit */
+ for (i = 0; i < outlen; i++)
+ *out++ = ipc_data_readl(4 * i);
+ }
+ mutex_unlock(&ipclock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
+
+/*
+ * intel_pmc_ipc_command
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ */
+int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+ u32 *out, u32 outlen)
+{
+ return intel_pmc_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_command);
+
+static irqreturn_t ioc(int irq, void *dev_id)
+{
+ int status;
+
+ if (ipcdev.irq_mode) {
+ status = ipc_read_status();
+ writel(status | IPC_STATUS_IRQ, ipcdev.ipc_base + IPC_STATUS);
+ }
+ complete(&ipcdev.cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ resource_size_t pci_resource;
+ int ret;
+ int len;
+
+ ipcdev.dev = &pci_dev_get(pdev)->dev;
+ ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, "intel_pmc_ipc");
+ if (ret)
+ return ret;
+
+ pci_resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!pci_resource || !len) {
+ dev_err(&pdev->dev, "Failed to get resource\n");
+ return -ENOMEM;
+ }
+
+ init_completion(&ipcdev.cmd_complete);
+
+ if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+ dev_err(&pdev->dev, "Failed to request irq\n");
+ return -EBUSY;
+ }
+
+ ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
+ if (!ipcdev.ipc_base) {
+ dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
+ free_irq(pdev->irq, &ipcdev);
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static void ipc_pci_remove(struct pci_dev *pdev)
+{
+ free_irq(pdev->irq, &ipcdev);
+ pci_release_regions(pdev);
+ pci_dev_put(pdev);
+ iounmap(ipcdev.ipc_base);
+ ipcdev.dev = NULL;
+}
+
+static const struct pci_device_id ipc_pci_ids[] = {
+ {PCI_VDEVICE(INTEL, 0x0a94), 0},
+ {PCI_VDEVICE(INTEL, 0x1a94), 0},
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, ipc_pci_ids);
+
+static struct pci_driver ipc_pci_driver = {
+ .name = "intel_pmc_ipc",
+ .id_table = ipc_pci_ids,
+ .probe = ipc_pci_probe,
+ .remove = ipc_pci_remove,
+};
+
+static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int subcmd;
+ int cmd;
+ int ret;
+
+ ret = sscanf(buf, "%d %d", &cmd, &subcmd);
+ if (ret != 2) {
+ dev_err(dev, "Error args\n");
+ return -EINVAL;
+ }
+
+ ret = intel_pmc_ipc_simple_command(cmd, subcmd);
+ if (ret) {
+ dev_err(dev, "command %d error with %d\n", cmd, ret);
+ return ret;
+ }
+ return (ssize_t)count;
+}
+
+static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int subcmd;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val)
+ subcmd = 1;
+ else
+ subcmd = 0;
+ ret = intel_pmc_ipc_simple_command(PMC_IPC_NORTHPEAK_CTRL, subcmd);
+ if (ret) {
+ dev_err(dev, "command north %d error with %d\n", subcmd, ret);
+ return ret;
+ }
+ return (ssize_t)count;
+}
+
+static DEVICE_ATTR(simplecmd, S_IWUSR,
+ NULL, intel_pmc_ipc_simple_cmd_store);
+static DEVICE_ATTR(northpeak, S_IWUSR,
+ NULL, intel_pmc_ipc_northpeak_store);
+
+static struct attribute *intel_ipc_attrs[] = {
+ &dev_attr_northpeak.attr,
+ &dev_attr_simplecmd.attr,
+ NULL
+};
+
+static const struct attribute_group intel_ipc_group = {
+ .attrs = intel_ipc_attrs,
+};
+
+#define PUNIT_RESOURCE_INTER 1
+static struct resource punit_res[] = {
+ /* Punit */
+ {
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+#define TCO_RESOURCE_ACPI_IO 0
+#define TCO_RESOURCE_SMI_EN_IO 1
+#define TCO_RESOURCE_GCR_MEM 2
+static struct resource tco_res[] = {
+ /* ACPI - TCO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - SMI */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* GCS */
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct lpc_ich_info tco_info = {
+ .name = "Apollo Lake SoC",
+ .iTCO_version = 3,
+};
+
+static int ipc_create_punit_device(void)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ int ret;
+
+ pdev = platform_device_alloc(PUNIT_DEVICE_NAME, -1);
+ if (!pdev) {
+ dev_err(ipcdev.dev, "Failed to alloc punit platform device\n");
+ return -ENOMEM;
+ }
+
+ pdev->dev.parent = ipcdev.dev;
+
+ res = punit_res;
+ res->start = (resource_size_t)ipcdev.punit_base;
+ res->end = res->start + ipcdev.punit_size - 1;
+
+ res = punit_res + PUNIT_RESOURCE_INTER;
+ res->start = (resource_size_t)ipcdev.punit_base2;
+ res->end = res->start + ipcdev.punit_size2 - 1;
+
+ ret = platform_device_add_resources(pdev, punit_res,
+ ARRAY_SIZE(punit_res));
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add platform punit resources\n");
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add punit platform device\n");
+ goto err;
+ }
+ ipcdev.punit_dev = pdev;
+
+ return 0;
+err:
+ platform_device_put(pdev);
+ return ret;
+}
+
+static int ipc_create_tco_device(void)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ int ret;
+
+ pdev = platform_device_alloc(TCO_DEVICE_NAME, -1);
+ if (!pdev) {
+ dev_err(ipcdev.dev, "Failed to alloc tco platform device\n");
+ return -ENOMEM;
+ }
+
+ pdev->dev.parent = ipcdev.dev;
+
+ res = tco_res + TCO_RESOURCE_ACPI_IO;
+ res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET;
+ res->end = res->start + TCO_REGS_SIZE - 1;
+
+ res = tco_res + TCO_RESOURCE_SMI_EN_IO;
+ res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET;
+ res->end = res->start + SMI_EN_SIZE - 1;
+
+ res = tco_res + TCO_RESOURCE_GCR_MEM;
+ res->start = (resource_size_t)ipcdev.gcr_base;
+ res->end = res->start + ipcdev.gcr_size - 1;
+
+ ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add tco platform resources\n");
+ goto err;
+ }
+
+ ret = platform_device_add_data(pdev, &tco_info,
+ sizeof(struct lpc_ich_info));
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add tco platform data\n");
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add tco platform device\n");
+ goto err;
+ }
+ ipcdev.tco_dev = pdev;
+
+ return 0;
+err:
+ platform_device_put(pdev);
+ return ret;
+}
+
+static int ipc_create_pmc_devices(void)
+{
+ int ret;
+
+ ret = ipc_create_tco_device();
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add tco platform device\n");
+ return ret;
+ }
+ ret = ipc_create_punit_device();
+ if (ret) {
+ dev_err(ipcdev.dev, "Failed to add punit platform device\n");
+ platform_device_unregister(ipcdev.tco_dev);
+ }
+ return ret;
+}
+
+static int ipc_plat_get_res(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *addr;
+ int size;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO,
+ PLAT_RESOURCE_ACPI_IO_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get io resource\n");
+ return -ENXIO;
+ }
+ size = resource_size(res);
+ ipcdev.acpi_io_base = (void *)res->start;
+ ipcdev.acpi_io_size = size;
+ dev_info(&pdev->dev, "io res: %llx %x\n",
+ (long long)res->start, (int)resource_size(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_PUNIT_DATA_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get punit resource\n");
+ return -ENXIO;
+ }
+ size = resource_size(res);
+ ipcdev.punit_base = (void *)res->start;
+ ipcdev.punit_size = size;
+ dev_info(&pdev->dev, "punit data res: %llx %x\n",
+ (long long)res->start, (int)resource_size(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_PUNIT_INTER_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get punit inter resource\n");
+ return -ENXIO;
+ }
+ size = resource_size(res);
+ ipcdev.punit_base2 = (void *)res->start;
+ ipcdev.punit_size2 = size;
+ dev_info(&pdev->dev, "punit interface res: %llx %x\n",
+ (long long)res->start, (int)resource_size(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_IPC_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get ipc resource\n");
+ return -ENXIO;
+ }
+ size = PLAT_RESOURCE_IPC_SIZE;
+ if (!request_mem_region(res->start, size, pdev->name)) {
+ dev_err(&pdev->dev, "Failed to request ipc resource\n");
+ return -EBUSY;
+ }
+ addr = ioremap_nocache(res->start, size);
+ if (!addr) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ release_mem_region(res->start, size);
+ return -ENOMEM;
+ }
+ ipcdev.ipc_base = addr;
+
+ ipcdev.gcr_base = (void *)(res->start + size);
+ ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
+ dev_info(&pdev->dev, "ipc res: %llx %x\n",
+ (long long)res->start, (int)resource_size(res));
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ipc_acpi_ids[] = {
+ { "INT34D2", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
+#endif
+
+static int ipc_plat_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ ipcdev.dev = &pdev->dev;
+ ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+ init_completion(&ipcdev.cmd_complete);
+
+ ipcdev.irq = platform_get_irq(pdev, 0);
+ if (ipcdev.irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq\n");
+ return -EINVAL;
+ }
+
+ ret = ipc_plat_get_res(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request resource\n");
+ return ret;
+ }
+
+ ret = ipc_create_pmc_devices();
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create pmc devices\n");
+ goto err_device;
+ }
+
+ if (request_irq(ipcdev.irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+ dev_err(&pdev->dev, "Failed to request irq\n");
+ ret = -EBUSY;
+ goto err_irq;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
+ ret);
+ goto err_sys;
+ }
+
+ return 0;
+err_sys:
+ free_irq(ipcdev.irq, &ipcdev);
+err_irq:
+ platform_device_unregister(ipcdev.tco_dev);
+ platform_device_unregister(ipcdev.punit_dev);
+err_device:
+ iounmap(ipcdev.ipc_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_IPC_INDEX);
+ if (res)
+ release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE);
+ return ret;
+}
+
+static int ipc_plat_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
+ free_irq(ipcdev.irq, &ipcdev);
+ platform_device_unregister(ipcdev.tco_dev);
+ platform_device_unregister(ipcdev.punit_dev);
+ iounmap(ipcdev.ipc_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_IPC_INDEX);
+ if (res)
+ release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE);
+ ipcdev.dev = NULL;
+ return 0;
+}
+
+static struct platform_driver ipc_plat_driver = {
+ .remove = ipc_plat_remove,
+ .probe = ipc_plat_probe,
+ .driver = {
+ .name = "pmc-ipc-plat",
+ .acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+ },
+};
+
+static int __init intel_pmc_ipc_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ipc_plat_driver);
+ if (ret) {
+ pr_err("Failed to register PMC ipc platform driver\n");
+ return ret;
+ }
+ ret = pci_register_driver(&ipc_pci_driver);
+ if (ret) {
+ pr_err("Failed to register PMC ipc pci driver\n");
+ platform_driver_unregister(&ipc_plat_driver);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit intel_pmc_ipc_exit(void)
+{
+ pci_unregister_driver(&ipc_pci_driver);
+ platform_driver_unregister(&ipc_plat_driver);
+}
+
+MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
+MODULE_DESCRIPTION("Intel PMC IPC driver");
+MODULE_LICENSE("GPL");
+
+/* Some modules are dependent on this, so init earlier */
+fs_initcall(intel_pmc_ipc_init);
+module_exit(intel_pmc_ipc_exit);
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index e36542564131..89aa976f0ab2 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -82,7 +82,7 @@ static int get_state(u32 *out, u8 instance)
tmp = 0;
}
- if (result.length > 0 && result.pointer)
+ if (result.length > 0)
kfree(result.pointer);
switch (instance) {
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 515f33882ab8..49c1720df59a 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,7 +7,6 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
-#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
{"", 0}
};
-#ifdef CONFIG_ACPI
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
- return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
-}
-#else
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- struct resource *res;
-
- res = io ? request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (res) {
- res->flags &= ~IORESOURCE_BUSY;
- return true;
- }
- return false;
-}
-#endif
-
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
- bool reserved;
+ struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
- reserved = __reserve_range(start, end - start + 1, !!port, regionid);
- if (!reserved)
+ if (port)
+ res = request_region(start, end - start + 1, regionid);
+ else
+ res = request_mem_region(start, end - start + 1, regionid);
+ if (res)
+ res->flags &= ~IORESOURCE_BUSY;
+ else
kfree(regionid);
/*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
- reserved ? "has been" : "could not be");
+ res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 5e343bab9458..28c711f0ac6b 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -41,6 +41,19 @@ config STE_MODEM_RPROC
This can be either built-in or a loadable module.
If unsure say N.
+config WKUP_M3_RPROC
+ tristate "AMx3xx Wakeup M3 remoteproc support"
+ depends on SOC_AM33XX || SOC_AM43XX
+ select REMOTEPROC
+ help
+ Say y here to support Wakeup M3 remote processor on TI AM33xx
+ and AM43xx family of SoCs.
+
+ Required for Suspend-to-RAM on AM33xx and AM43xx SoCs. Also needed
+ for deep CPUIdle states on AM33xx SoCs. Allows for loading of the
+ firmware onto these remote processors.
+ If unsure say N.
+
config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index ac2ff75686d2..81b04d1e2e58 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -9,4 +9,5 @@ remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
+obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index f8d6a0661c14..009e56f67de2 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -26,8 +26,7 @@
static char *da8xx_fw_name;
module_param(da8xx_fw_name, charp, S_IRUGO);
MODULE_PARM_DESC(da8xx_fw_name,
- "\n\t\tName of DSP firmware file in /lib/firmware"
- " (if not specified defaults to 'rproc-dsp-fw')");
+ "Name of DSP firmware file in /lib/firmware (if not specified defaults to 'rproc-dsp-fw')");
/*
* OMAP-L138 Technical References:
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 11cdb119e4f3..8b3130f22b42 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -44,6 +44,9 @@
#include "remoteproc_internal.h"
+static DEFINE_MUTEX(rproc_list_mutex);
+static LIST_HEAD(rproc_list);
+
typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
struct resource_table *table, int len);
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
@@ -132,32 +135,48 @@ static void rproc_disable_iommu(struct rproc *rproc)
iommu_detach_device(domain, dev);
iommu_domain_free(domain);
-
- return;
}
-/*
+/**
+ * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
+ * @rproc: handle of a remote processor
+ * @da: remoteproc device address to translate
+ * @len: length of the memory region @da is pointing to
+ *
* Some remote processors will ask us to allocate them physically contiguous
* memory regions (which we call "carveouts"), and map them to specific
- * device addresses (which are hardcoded in the firmware).
+ * device addresses (which are hardcoded in the firmware). They may also have
+ * dedicated memory regions internal to the processors, and use them either
+ * exclusively or alongside carveouts.
*
* They may then ask us to copy objects into specific device addresses (e.g.
* code/data sections) or expose us certain symbols in other device address
* (e.g. their trace buffer).
*
- * This function is an internal helper with which we can go over the allocated
- * carveouts and translate specific device address to kernel virtual addresses
- * so we can access the referenced memory.
+ * This function is a helper function with which we can go over the allocated
+ * carveouts and translate specific device addresses to kernel virtual addresses
+ * so we can access the referenced memory. This function also allows to perform
+ * translations on the internal remoteproc memory regions through a platform
+ * implementation specific da_to_va ops, if present.
+ *
+ * The function returns a valid kernel address on success or NULL on failure.
*
* Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
* but only on kernel direct mapped RAM memory. Instead, we're just using
- * here the output of the DMA API, which should be more correct.
+ * here the output of the DMA API for the carveouts, which should be more
+ * correct.
*/
void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
+ if (rproc->ops->da_to_va) {
+ ptr = rproc->ops->da_to_va(rproc, da, len);
+ if (ptr)
+ goto out;
+ }
+
list_for_each_entry(carveout, &rproc->carveouts, node) {
int offset = da - carveout->da;
@@ -174,6 +193,7 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
break;
}
+out:
return ptr;
}
EXPORT_SYMBOL(rproc_da_to_va);
@@ -411,10 +431,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
}
trace = kzalloc(sizeof(*trace), GFP_KERNEL);
- if (!trace) {
- dev_err(dev, "kzalloc trace failed\n");
+ if (!trace)
return -ENOMEM;
- }
/* set the trace buffer dma properties */
trace->len = rsc->len;
@@ -489,10 +507,8 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
}
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
- if (!mapping) {
- dev_err(dev, "kzalloc mapping failed\n");
+ if (!mapping)
return -ENOMEM;
- }
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
if (ret) {
@@ -565,10 +581,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
rsc->da, rsc->pa, rsc->len, rsc->flags);
carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
- if (!carveout) {
- dev_err(dev, "kzalloc carveout failed\n");
+ if (!carveout)
return -ENOMEM;
- }
va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
if (!va) {
@@ -768,7 +782,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
/* clean up carveout allocations */
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
- dma_free_coherent(dev->parent, entry->len, entry->va, entry->dma);
+ dma_free_coherent(dev->parent, entry->len, entry->va,
+ entry->dma);
list_del(&entry->node);
kfree(entry);
}
@@ -808,9 +823,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw, &tablesz);
- if (!table) {
+ if (!table)
goto clean_up;
- }
/* Verify that resource table in loaded fw is unchanged */
if (rproc->table_csum != crc32(0, table, tablesz)) {
@@ -911,7 +925,8 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
/* count the number of notify-ids */
rproc->max_notifyid = -1;
- ret = rproc_handle_resources(rproc, tablesz, rproc_count_vrings_handler);
+ ret = rproc_handle_resources(rproc, tablesz,
+ rproc_count_vrings_handler);
if (ret)
goto out;
@@ -1152,6 +1167,50 @@ out:
EXPORT_SYMBOL(rproc_shutdown);
/**
+ * rproc_get_by_phandle() - find a remote processor by phandle
+ * @phandle: phandle to the rproc
+ *
+ * Finds an rproc handle using the remote processor's phandle, and then
+ * return a handle to the rproc.
+ *
+ * This function increments the remote processor's refcount, so always
+ * use rproc_put() to decrement it back once rproc isn't needed anymore.
+ *
+ * Returns the rproc handle on success, and NULL on failure.
+ */
+#ifdef CONFIG_OF
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+ struct rproc *rproc = NULL, *r;
+ struct device_node *np;
+
+ np = of_find_node_by_phandle(phandle);
+ if (!np)
+ return NULL;
+
+ mutex_lock(&rproc_list_mutex);
+ list_for_each_entry(r, &rproc_list, node) {
+ if (r->dev.parent && r->dev.parent->of_node == np) {
+ rproc = r;
+ get_device(&rproc->dev);
+ break;
+ }
+ }
+ mutex_unlock(&rproc_list_mutex);
+
+ of_node_put(np);
+
+ return rproc;
+}
+#else
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+ return NULL;
+}
+#endif
+EXPORT_SYMBOL(rproc_get_by_phandle);
+
+/**
* rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
*
@@ -1180,6 +1239,11 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
+ /* expose to rproc_get_by_phandle users */
+ mutex_lock(&rproc_list_mutex);
+ list_add(&rproc->node, &rproc_list);
+ mutex_unlock(&rproc_list_mutex);
+
dev_info(dev, "%s is available\n", rproc->name);
dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
@@ -1268,10 +1332,8 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
name_len = strlen(name) + strlen(template) - 2 + 1;
rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
- if (!rproc) {
- dev_err(dev, "%s: kzalloc failed\n", __func__);
+ if (!rproc)
return NULL;
- }
if (!firmware) {
p = (char *)rproc + sizeof(struct rproc) + len;
@@ -1369,6 +1431,11 @@ int rproc_del(struct rproc *rproc)
/* Free the copy of the resource table */
kfree(rproc->cached_table);
+ /* the rproc is downref'ed as soon as it's removed from the klist */
+ mutex_lock(&rproc_list_mutex);
+ list_del(&rproc->node);
+ mutex_unlock(&rproc_list_mutex);
+
device_del(&rproc->dev);
return 0;
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 70701a50ddfa..8041b95cb058 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -35,7 +35,7 @@ struct rproc;
* @get_boot_addr: get boot address to entry point specified in firmware
*/
struct rproc_fw_ops {
- struct resource_table *(*find_rsc_table) (struct rproc *rproc,
+ struct resource_table *(*find_rsc_table)(struct rproc *rproc,
const struct firmware *fw,
int *tablesz);
struct resource_table *(*find_loaded_rsc_table)(struct rproc *rproc,
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index dd193f35a1ff..53dc17bdd54e 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -67,8 +67,7 @@ static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw)
static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data)
{
int i;
- const struct ste_toc *toc;
- toc = data;
+ const struct ste_toc *toc = data;
/* Search the table for the resource table */
for (i = 0; i < SPROC_MAX_TOC_ENTRIES &&
@@ -230,6 +229,7 @@ static int sproc_start(struct rproc *rproc)
static int sproc_stop(struct rproc *rproc)
{
struct sproc *sproc = rproc->priv;
+
sproc_dbg(sproc, "stop ste-modem\n");
return sproc->mdev->ops.power(sproc->mdev, false);
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
new file mode 100644
index 000000000000..edf81819cce1
--- /dev/null
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -0,0 +1,257 @@
+/*
+ * TI AMx3 Wakeup M3 Remote Processor driver
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+
+#include <linux/platform_data/wkup_m3.h>
+
+#include "remoteproc_internal.h"
+
+#define WKUPM3_MEM_MAX 2
+
+/**
+ * struct wkup_m3_mem - WkupM3 internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from Wakeup M3 view
+ * @size: Size of the memory region
+ */
+struct wkup_m3_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct wkup_m3_rproc - WkupM3 remote processor state
+ * @rproc: rproc handle
+ * @pdev: pointer to platform device
+ * @mem: WkupM3 memory information
+ */
+struct wkup_m3_rproc {
+ struct rproc *rproc;
+ struct platform_device *pdev;
+ struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+};
+
+static int wkup_m3_rproc_start(struct rproc *rproc)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ struct platform_device *pdev = wkupm3->pdev;
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+
+ if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+ dev_err(dev, "Unable to reset wkup_m3!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int wkup_m3_rproc_stop(struct rproc *rproc)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ struct platform_device *pdev = wkupm3->pdev;
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+
+ if (pdata->assert_reset(pdev, pdata->reset_name)) {
+ dev_err(dev, "Unable to assert reset of wkup_m3!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ void *va = NULL;
+ int i;
+ u32 offset;
+
+ if (len <= 0)
+ return NULL;
+
+ for (i = 0; i < WKUPM3_MEM_MAX; i++) {
+ if (da >= wkupm3->mem[i].dev_addr && da + len <=
+ wkupm3->mem[i].dev_addr + wkupm3->mem[i].size) {
+ offset = da - wkupm3->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ va = (__force void *)(wkupm3->mem[i].cpu_addr + offset);
+ break;
+ }
+ }
+
+ return va;
+}
+
+static struct rproc_ops wkup_m3_rproc_ops = {
+ .start = wkup_m3_rproc_start,
+ .stop = wkup_m3_rproc_stop,
+ .da_to_va = wkup_m3_rproc_da_to_va,
+};
+
+static const struct of_device_id wkup_m3_rproc_of_match[] = {
+ { .compatible = "ti,am3352-wkup-m3", },
+ { .compatible = "ti,am4372-wkup-m3", },
+ {},
+};
+
+static int wkup_m3_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev->platform_data;
+ /* umem always needs to be processed first */
+ const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" };
+ struct wkup_m3_rproc *wkupm3;
+ const char *fw_name;
+ struct rproc *rproc;
+ struct resource *res;
+ const __be32 *addrp;
+ u32 l4_offset = 0;
+ u64 size;
+ int ret;
+ int i;
+
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
+ &fw_name);
+ if (ret) {
+ dev_err(dev, "No firmware filename given\n");
+ return -ENODEV;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+ goto err;
+ }
+
+ rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
+ fw_name, sizeof(*wkupm3));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ wkupm3 = rproc->priv;
+ wkupm3->rproc = rproc;
+ wkupm3->pdev = pdev;
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wkupm3->mem[i].cpu_addr)) {
+ dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
+ i);
+ ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
+ goto err;
+ }
+ wkupm3->mem[i].bus_addr = res->start;
+ wkupm3->mem[i].size = resource_size(res);
+ addrp = of_get_address(dev->of_node, i, &size, NULL);
+ /*
+ * The wkupm3 has umem at address 0 in its view, so the device
+ * addresses for each memory region is computed as a relative
+ * offset of the bus address for umem, and therefore needs to be
+ * processed first.
+ */
+ if (!strcmp(mem_names[i], "umem"))
+ l4_offset = be32_to_cpu(*addrp);
+ wkupm3->mem[i].dev_addr = be32_to_cpu(*addrp) - l4_offset;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto err_put_rproc;
+ }
+
+ return 0;
+
+err_put_rproc:
+ rproc_put(rproc);
+err:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int wkup_m3_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ rproc_del(rproc);
+ rproc_put(rproc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wkup_m3_rpm_suspend(struct device *dev)
+{
+ return -EBUSY;
+}
+
+static int wkup_m3_rpm_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
+ SET_RUNTIME_PM_OPS(wkup_m3_rpm_suspend, wkup_m3_rpm_resume, NULL)
+};
+
+static struct platform_driver wkup_m3_rproc_driver = {
+ .probe = wkup_m3_rproc_probe,
+ .remove = wkup_m3_rproc_remove,
+ .driver = {
+ .name = "wkup_m3_rproc",
+ .of_match_table = wkup_m3_rproc_of_match,
+ .pm = &wkup_m3_rproc_pm_ops,
+ },
+};
+
+module_platform_driver(wkup_m3_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Wakeup M3 remote processor control driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 6f1fa1773e76..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -65,6 +65,7 @@ struct virtio_ccw_device {
bool is_thinint;
bool going_away;
bool device_lost;
+ unsigned int config_ready;
void *airq_info;
};
@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
if (ret)
goto out_free;
- memcpy(vcdev->config, config_area, sizeof(vcdev->config));
- memcpy(buf, &vcdev->config[offset], len);
+ memcpy(vcdev->config, config_area, offset + len);
+ if (buf)
+ memcpy(buf, &vcdev->config[offset], len);
+ if (vcdev->config_ready < offset + len)
+ vcdev->config_ready = offset + len;
out_free:
kfree(config_area);
@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
if (!config_area)
goto out_free;
+ /* Make sure we don't overwrite fields. */
+ if (vcdev->config_ready < offset)
+ virtio_ccw_get_config(vdev, 0, NULL, offset);
memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 4a484d60be0d..b749026aa592 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1191,7 +1191,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- if (cmd->tag == abts->exchange_addr_to_abort) {
+ if (se_cmd->tag == abts->exchange_addr_to_abort) {
lun = cmd->unpacked_lun;
found_lun = true;
break;
@@ -1728,9 +1728,8 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (unlikely(cmd->aborted)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange "
- "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
- se_cmd, cmd->tag);
+ "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
+ vha->vp_idx, cmd, se_cmd, se_cmd->tag);
cmd->state = QLA_TGT_STATE_ABORTED;
cmd->cmd_flags |= BIT_6;
@@ -1765,18 +1764,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
- "Residual underflow: %d (tag %d, "
- "op %x, bufflen %d, rq_result %x)\n", prm->residual,
- cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
- cmd->bufflen, prm->rq_result);
+ "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+ prm->residual, se_cmd->tag,
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
prm->rq_result |= SS_RESIDUAL_UNDER;
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
ql_dbg(ql_dbg_io, vha, 0x305d,
- "Residual overflow: %d (tag %d, "
- "op %x, bufflen %d, rq_result %x)\n", prm->residual,
- cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
- cmd->bufflen, prm->rq_result);
+ "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+ prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
+ se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
prm->rq_result |= SS_RESIDUAL_OVER;
}
@@ -1849,7 +1847,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
== 50) {
*xmit_type &= ~QLA_TGT_XMIT_STATUS;
ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
- "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+ "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
}
#endif
/*
@@ -1873,7 +1871,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
"Cutting cmd %p (tag %d) buffer"
" tail to len %d, sg_cnt %d (cmd->bufflen %d,"
- " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+ " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
cmd->bufflen, cmd->sg_cnt);
cmd->bufflen = tot_len;
@@ -1885,13 +1883,13 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
"Cutting cmd %p (tag %d) buffer head "
- "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+ "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
cmd->bufflen);
if (offset == 0)
*xmit_type &= ~QLA_TGT_XMIT_DATA;
else if (qlt_set_data_offset(cmd, offset)) {
ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
- "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+ "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
}
}
}
@@ -3194,7 +3192,7 @@ skip_term:
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
- "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+ "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
@@ -3266,7 +3264,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
goto out_term;
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
- cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
(struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
@@ -3893,9 +3891,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
resp = 1;
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
- "qla_target(%d): SRR for in data for cmd "
- "without them (tag %d, SCSI status %d), "
- "reject", vha->vp_idx, cmd->tag,
+ "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
+ vha->vp_idx, se_cmd->tag,
cmd->se_cmd.scsi_status);
goto out_reject;
}
@@ -3929,10 +3926,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
}
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
- "qla_target(%d): SRR for out data for cmd "
- "without them (tag %d, SCSI status %d), "
- "reject", vha->vp_idx, cmd->tag,
- cmd->se_cmd.scsi_status);
+ "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
+ vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
goto out_reject;
}
break;
@@ -4053,10 +4048,9 @@ restart:
cmd->sg = se_cmd->t_data_sg;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
- "SRR cmd %p (se_cmd %p, tag %d, op %x), "
- "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
- se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
- cmd->sg_cnt, cmd->offset);
+ "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
+ cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
+ se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 332086776dfe..985d76dd706b 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -924,7 +924,6 @@ struct qla_tgt_cmd {
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
int offset;
- uint32_t tag;
uint32_t unpacked_lun;
enum dma_data_direction dma_data_direction;
uint32_t reset_count;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e32d24ec7a11..d9a8c6084346 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -44,7 +44,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "qla_def.h"
@@ -54,9 +53,6 @@
static struct workqueue_struct *tcm_qla2xxx_free_wq;
static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
-static const struct target_core_fabric_ops tcm_qla2xxx_ops;
-static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
-
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
@@ -191,23 +187,6 @@ static char *tcm_qla2xxx_npiv_get_fabric_name(void)
return "qla2xxx_npiv";
}
-static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
- u8 proto_id;
-
- switch (lport->lport_proto_id) {
- case SCSI_PROTOCOL_FCP:
- default:
- proto_id = fc_get_fabric_proto_ident(se_tpg);
- break;
- }
-
- return proto_id;
-}
-
static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -224,78 +203,6 @@ static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
return tpg->lport_tpgt;
}
-static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 tcm_qla2xxx_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
- int ret = 0;
-
- switch (lport->lport_proto_id) {
- case SCSI_PROTOCOL_FCP:
- default:
- ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- break;
- }
-
- return ret;
-}
-
-static u32 tcm_qla2xxx_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
- int ret = 0;
-
- switch (lport->lport_proto_id) {
- case SCSI_PROTOCOL_FCP:
- default:
- ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- break;
- }
-
- return ret;
-}
-
-static char *tcm_qla2xxx_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
- char *tid = NULL;
-
- switch (lport->lport_proto_id) {
- case SCSI_PROTOCOL_FCP:
- default:
- tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- break;
- }
-
- return tid;
-}
-
static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -344,29 +251,6 @@ static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
return tpg->tpg_attrib.fabric_prot_type;
}
-static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
- struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
- return NULL;
- }
-
- return &nacl->se_node_acl;
-}
-
-static void tcm_qla2xxx_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
- struct tcm_qla2xxx_nacl, se_node_acl);
- kfree(nacl);
-}
-
static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -430,7 +314,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
cmd->cmd_flags |= BIT_14;
}
- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ return target_put_sess_cmd(se_cmd);
}
/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
@@ -534,19 +418,6 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
return;
}
-static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
-{
- struct qla_tgt_cmd *cmd;
-
- /* check for task mgmt cmd */
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- return 0xffffffff;
-
- cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
-
- return cmd->tag;
-}
-
static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -827,17 +698,6 @@ static void tcm_qla2xxx_release_session(struct kref *kref)
qlt_unreg_sess(se_sess->fabric_sess_ptr);
}
-static void tcm_qla2xxx_put_session(struct se_session *se_sess)
-{
- struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
- struct qla_hw_data *ha = sess->vha->hw;
- unsigned long flags;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
if (!sess)
@@ -853,53 +713,20 @@ static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
target_sess_cmd_list_set_waiting(sess->se_sess);
}
-static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
+ const char *name)
{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct tcm_qla2xxx_nacl *nacl;
+ struct tcm_qla2xxx_nacl *nacl =
+ container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
u64 wwnn;
- u32 qla2xxx_nexus_depth;
if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
- return ERR_PTR(-EINVAL);
-
- se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
- qla2xxx_nexus_depth = 1;
+ return -EINVAL;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, qla2xxx_nexus_depth);
- if (IS_ERR(se_nacl)) {
- tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
- /*
- * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
- */
- nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
nacl->nport_wwnn = wwnn;
tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
- return se_nacl;
-}
-
-static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
-{
- struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
- struct tcm_qla2xxx_nacl, se_node_acl);
-
- core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
- kfree(nacl);
+ return 0;
}
/* Start items for tcm_qla2xxx_tpg_attrib_cit */
@@ -1175,8 +1002,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_ops, wwn,
- &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -1295,8 +1121,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn,
- &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -1988,14 +1813,10 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.module = THIS_MODULE,
.name = "qla2xxx",
+ .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
.get_fabric_name = tcm_qla2xxx_get_fabric_name,
- .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
- .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
- .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect =
@@ -2004,12 +1825,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
tcm_qla2xxx_check_prod_write_protect,
.tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
- .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
- .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
@@ -2017,7 +1835,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.write_pending = tcm_qla2xxx_write_pending,
.write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
- .get_task_tag = tcm_qla2xxx_get_task_tag,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
@@ -2031,12 +1848,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.fabric_drop_wwn = tcm_qla2xxx_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_make_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
- .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+ .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
.tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
@@ -2046,26 +1858,19 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.module = THIS_MODULE,
.name = "qla2xxx_npiv",
+ .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
- .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
- .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
- .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
- .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
- .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
@@ -2073,7 +1878,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.write_pending = tcm_qla2xxx_write_pending,
.write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
- .get_task_tag = tcm_qla2xxx_get_task_tag,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
.queue_status = tcm_qla2xxx_queue_status,
@@ -2087,12 +1891,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
.fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
.fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
- .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+ .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
.tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 23295115c9fc..3bbf4cb6fd97 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -13,6 +13,8 @@
#include "qla_target.h"
struct tcm_qla2xxx_nacl {
+ struct se_node_acl se_node_acl;
+
/* From libfc struct fc_rport->port_id */
u32 nport_id;
/* Binary World Wide unique Node Name for remote FC Initiator Nport */
@@ -23,8 +25,6 @@ struct tcm_qla2xxx_nacl {
struct qla_tgt_sess *qla_tgt_sess;
/* Pointer to TCM FC nexus */
struct se_session *nport_nexus;
- /* Returned by tcm_qla2xxx_make_nodeacl() */
- struct se_node_acl se_node_acl;
};
struct tcm_qla2xxx_tpg_attrib {
@@ -57,8 +57,6 @@ struct tcm_qla2xxx_fc_loopid {
};
struct tcm_qla2xxx_lport {
- /* SCSI protocol the lport is providing */
- u8 lport_proto_id;
/* Binary World Wide unique Port Name for FC Target Lport */
u64 lport_wwpn;
/* Binary World Wide unique Port Name for FC NPIV Target Lport */
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index b562af816c0a..b04b05a0904e 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -260,7 +260,7 @@ static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
/* We have atleast one power down mode */
cpumask_clear(&mask);
cpumask_set_cpu(cpu, &mask);
- qcom_scm_set_warm_boot_addr(cpu_resume, &mask);
+ qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
}
per_cpu(qcom_idle_ops, cpu) = fns;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a3fba366cebe..4e68b62193ed 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
@@ -716,7 +715,7 @@ static int iscsit_add_reject_from_cmd(
*/
if (cmd->se_cmd.se_tfo != NULL) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
}
return -1;
}
@@ -1002,13 +1001,15 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+ target_get_sess_cmd(&cmd->se_cmd, true);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (cmd->sense_reason)
goto attach_cmd;
+ /* only used for printks or comparing with ->ref_task_tag */
+ cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1068,7 +1069,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
}
@@ -1084,7 +1085,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (!cmd->sense_reason)
return 0;
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
@@ -1115,7 +1116,6 @@ static int
iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
- struct iscsi_conn *conn = cmd->conn;
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
@@ -1142,7 +1142,7 @@ after_immediate_data:
rc = iscsit_dump_data_payload(cmd->conn,
cmd->first_burst_len, 1);
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return rc;
} else if (cmd->unsolicited_data)
iscsit_set_unsoliticed_dataout(cmd);
@@ -1811,7 +1811,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+ target_get_sess_cmd(&cmd->se_cmd, true);
sess_ref = true;
switch (function) {
@@ -1953,7 +1953,7 @@ attach:
*/
if (sess_ref) {
pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
}
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
@@ -2737,11 +2737,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
cmd->iov_data_count = iov_count;
cmd->tx_size = tx_size;
- /* sendpage is preferred but can't insert markers */
- if (!conn->conn_ops->IFMarker)
- ret = iscsit_fe_sendpage_sg(cmd, conn);
- else
- ret = iscsit_send_tx_data(cmd, conn, 0);
+ ret = iscsit_fe_sendpage_sg(cmd, conn);
iscsit_unmap_iovec(cmd);
@@ -4073,17 +4069,9 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
" opcode while ERL=0, closing iSCSI connection.\n");
return -1;
}
- if (!conn->conn_ops->OFMarker) {
- pr_err("Unable to recover from unknown"
- " opcode while OFMarker=No, closing iSCSI"
- " connection.\n");
- return -1;
- }
- if (iscsit_recover_from_unknown_opcode(conn) < 0) {
- pr_err("Unable to recover from unknown"
- " opcode, closing iSCSI connection.\n");
- return -1;
- }
+ pr_err("Unable to recover from unknown opcode while OFMarker=No,"
+ " closing iSCSI connection.\n");
+ ret = -1;
break;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 469fce44ebad..c1898c84b3d2 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -24,7 +24,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <target/iscsi/iscsi_transport.h>
@@ -860,57 +859,19 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = {
NULL,
};
-static struct se_node_acl *lio_tpg_alloc_fabric_acl(
- struct se_portal_group *se_tpg)
+static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
+ const char *name)
{
- struct iscsi_node_acl *acl;
-
- acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
- if (!acl) {
- pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
- return NULL;
- }
-
- return &acl->se_node_acl;
-}
-
-static struct se_node_acl *lio_target_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
-{
- struct config_group *stats_cg;
- struct iscsi_node_acl *acl;
- struct se_node_acl *se_nacl_new, *se_nacl;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- u32 cmdsn_depth;
-
- se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-
- cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NdoeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, cmdsn_depth);
- if (IS_ERR(se_nacl))
- return se_nacl;
-
- acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
- stats_cg = &se_nacl->acl_fabric_stat_group;
+ struct iscsi_node_acl *acl =
+ container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+ struct config_group *stats_cg = &se_nacl->acl_fabric_stat_group;
stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
" stats_cg->default_groups\n");
- core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
- kfree(acl);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
@@ -918,13 +879,11 @@ static struct se_node_acl *lio_target_make_nodeacl(
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
- return se_nacl;
+ return 0;
}
-static void lio_target_drop_nodeacl(
- struct se_node_acl *se_nacl)
+static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
{
- struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct iscsi_node_acl *acl = container_of(se_nacl,
struct iscsi_node_acl, se_node_acl);
struct config_item *df_item;
@@ -938,9 +897,6 @@ static void lio_target_drop_nodeacl(
config_item_put(df_item);
}
kfree(stats_cg->default_groups);
-
- core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
- kfree(acl);
}
/* End items for lio_target_acl_cit */
@@ -1463,8 +1419,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
if (!tpg)
return NULL;
- ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg,
- tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
return NULL;
@@ -1735,14 +1690,6 @@ static char *iscsi_get_fabric_name(void)
return "iSCSI";
}
-static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
-{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-
- /* only used for printks or comparism with ->ref_task_tag */
- return (__force u32)cmd->init_task_tag;
-}
-
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1832,78 +1779,58 @@ static void lio_aborted_task(struct se_cmd *se_cmd)
cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
-static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+ return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
- return &tpg->tpg_tiqn->tiqn[0];
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpgt;
+ return iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_attrib.default_cmdsn_depth;
+ return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_attrib.generate_node_acls;
+ return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_attrib.cache_dynamic_acls;
+ return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_attrib.demo_mode_write_protect;
+ return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_attrib.prod_mode_write_protect;
+ return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
static int lio_tpg_check_prot_fabric_only(
struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Only report fabric_prot_type if t10_pi has also been enabled
* for incoming ib_isert sessions.
*/
- if (!tpg->tpg_attrib.t10_pi)
+ if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
return 0;
-
- return tpg->tpg_attrib.fabric_prot_type;
-}
-
-static void lio_tpg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_acl)
-{
- struct iscsi_node_acl *acl = container_of(se_acl,
- struct iscsi_node_acl, se_node_acl);
- kfree(acl);
+ return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
@@ -1948,9 +1875,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_tiqn->tiqn_index;
+ return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
@@ -1967,7 +1892,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
static int lio_check_stop_free(struct se_cmd *se_cmd)
{
- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ return target_put_sess_cmd(se_cmd);
}
static void lio_release_cmd(struct se_cmd *se_cmd)
@@ -1981,14 +1906,11 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
const struct target_core_fabric_ops iscsi_ops = {
.module = THIS_MODULE,
.name = "iscsi",
+ .node_acl_size = sizeof(struct iscsi_node_acl),
.get_fabric_name = iscsi_get_fabric_name,
- .get_fabric_proto_ident = iscsi_get_fabric_proto_ident,
.tpg_get_wwn = lio_tpg_get_endpoint_wwn,
.tpg_get_tag = lio_tpg_get_tag,
.tpg_get_default_depth = lio_tpg_get_default_depth,
- .tpg_get_pr_transport_id = iscsi_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id,
.tpg_check_demo_mode = lio_tpg_check_demo_mode,
.tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect =
@@ -1996,8 +1918,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.tpg_check_prod_mode_write_protect =
lio_tpg_check_prod_mode_write_protect,
.tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only,
- .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl,
- .tpg_release_fabric_acl = lio_tpg_release_fabric_acl,
.tpg_get_inst_index = lio_tpg_get_inst_index,
.check_stop_free = lio_check_stop_free,
.release_cmd = lio_release_cmd,
@@ -2008,7 +1928,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.write_pending = lio_write_pending,
.write_pending_status = lio_write_pending_status,
.set_default_node_attributes = lio_set_default_node_attributes,
- .get_task_tag = iscsi_get_task_tag,
.get_cmd_state = iscsi_get_cmd_state,
.queue_data_in = lio_queue_data_in,
.queue_status = lio_queue_status,
@@ -2020,8 +1939,8 @@ const struct target_core_fabric_ops iscsi_ops = {
.fabric_drop_tpg = lio_target_tiqn_deltpg,
.fabric_make_np = lio_target_call_addnptotpg,
.fabric_drop_np = lio_target_call_delnpfromtpg,
- .fabric_make_nodeacl = lio_target_make_nodeacl,
- .fabric_drop_nodeacl = lio_target_drop_nodeacl,
+ .fabric_init_nodeacl = lio_target_init_nodeacl,
+ .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl,
.tfc_discovery_attrs = lio_target_discovery_auth_attrs,
.tfc_wwn_attrs = lio_target_wwn_attrs,
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 959a14c9dd5d..210f6e4830e3 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -956,56 +956,3 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
iscsit_handle_connection_cleanup(conn);
}
-
-/*
- * This is the simple function that makes the magic of
- * sync and steering happen in the follow paradoxical order:
- *
- * 0) Receive conn->of_marker (bytes left until next OFMarker)
- * bytes into an offload buffer. When we pass the exact number
- * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
- * rx_data() will automatically receive the identical u32 marker
- * values and store it in conn->of_marker_offset;
- * 1) Now conn->of_marker_offset will contain the offset to the start
- * of the next iSCSI PDU. Dump these remaining bytes into another
- * offload buffer.
- * 2) We are done!
- * Next byte in the TCP stream will contain the next iSCSI PDU!
- * Cool Huh?!
- */
-int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
-{
- /*
- * Make sure the remaining bytes to next maker is a sane value.
- */
- if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
- pr_err("Remaining bytes to OFMarker: %u exceeds"
- " OFMarkInt bytes: %u.\n", conn->of_marker,
- conn->conn_ops->OFMarkInt * 4);
- return -1;
- }
-
- pr_debug("Advancing %u bytes in TCP stream to get to the"
- " next OFMarker.\n", conn->of_marker);
-
- if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
- return -1;
-
- /*
- * Make sure the offset marker we retrived is a valid value.
- */
- if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
- conn->conn_ops->MaxRecvDataSegmentLength)) {
- pr_err("OfMarker offset value: %u exceeds limit.\n",
- conn->of_marker_offset);
- return -1;
- }
-
- pr_debug("Discarding %u bytes of TCP stream to get to the"
- " next iSCSI Opcode.\n", conn->of_marker_offset);
-
- if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
- return -1;
-
- return 0;
-}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index 21acc9a06376..a9e2f9497fb2 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -10,6 +10,5 @@ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
-extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 70d799dfab03..3d0fe4ff5590 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -410,8 +410,6 @@ static int iscsi_login_zero_tsih_s2(
if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
return -1;
- if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
- return -1;
/*
* Set RDMAExtensions=Yes by default for iSER enabled network portals
*/
@@ -477,59 +475,6 @@ check_prot:
return 0;
}
-/*
- * Remove PSTATE_NEGOTIATE for the four FIM related keys.
- * The Initiator node will be able to enable FIM by proposing them itself.
- */
-int iscsi_login_disable_FIM_keys(
- struct iscsi_param_list *param_list,
- struct iscsi_conn *conn)
-{
- struct iscsi_param *param;
-
- param = iscsi_find_param_from_key("OFMarker", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " OFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
-
- param = iscsi_find_param_from_key("OFMarkInt", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
-
- param = iscsi_find_param_from_key("IFMarker", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
-
- param = iscsi_find_param_from_key("IFMarkInt", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
-
- return 0;
-}
-
static int iscsi_login_non_zero_tsih_s1(
struct iscsi_conn *conn,
unsigned char *buf)
@@ -616,7 +561,7 @@ static int iscsi_login_non_zero_tsih_s2(
if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
- return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+ return 0;
}
int iscsi_login_post_auth_non_zero_tsih(
@@ -765,7 +710,6 @@ int iscsi_post_login_handler(
conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
- iscsit_set_sync_and_steering_values(conn);
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 29d098324b7f..1c7358081533 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -16,6 +16,5 @@ extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
bool, bool);
extern int iscsi_target_login_thread(void *);
-extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index d4f9e9645697..e8a52f7d6204 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -34,13 +34,6 @@ int iscsi_login_rx_data(
iov.iov_len = length;
iov.iov_base = buf;
- /*
- * Initial Marker-less Interval.
- * Add the values regardless of IFMarker/OFMarker, considering
- * it may not be negoitated yet.
- */
- conn->of_marker += length;
-
rx_got = rx_data(conn, &iov, 1, length);
if (rx_got != length) {
pr_err("rx_data returned %d, expecting %d.\n",
@@ -72,13 +65,6 @@ int iscsi_login_tx_data(
iov_cnt++;
}
- /*
- * Initial Marker-less Interval.
- * Add the values regardless of IFMarker/OFMarker, considering
- * it may not be negoitated yet.
- */
- conn->if_marker += length;
-
tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
if (tx_sent != length) {
pr_err("tx_data returned %d, expecting %d.\n",
@@ -97,12 +83,6 @@ void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
"CRC32C" : "None");
pr_debug("MaxRecvDataSegmentLength: %u\n",
conn_ops->MaxRecvDataSegmentLength);
- pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
- pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
- if (conn_ops->OFMarker)
- pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
- if (conn_ops->IFMarker)
- pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
}
void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
@@ -194,10 +174,6 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
case TYPERANGE_DIGEST:
param->type = TYPE_VALUE_LIST | TYPE_STRING;
break;
- case TYPERANGE_MARKINT:
- param->type = TYPE_NUMBER_RANGE;
- param->type_range |= TYPERANGE_1_TO_65535;
- break;
case TYPERANGE_ISCSINAME:
case TYPERANGE_SESSIONTYPE:
case TYPERANGE_TARGETADDRESS:
@@ -422,13 +398,13 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
- TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
- TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
/*
@@ -524,9 +500,9 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, OFMARKER)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, IFMARKINT)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKINT)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
if (iser)
SET_PSTATE_NEGOTIATE(param);
@@ -906,91 +882,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
return 0;
}
-static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
-{
- char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL;
- u32 left_val, right_val, local_left_val;
-
- if (strcmp(param->name, IFMARKINT) &&
- strcmp(param->name, OFMARKINT)) {
- pr_err("Only parameters \"%s\" or \"%s\" may contain a"
- " numerical range value.\n", IFMARKINT, OFMARKINT);
- return -1;
- }
-
- if (IS_PSTATE_PROPOSER(param))
- return 0;
-
- tilde_ptr = strchr(value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range indicator"
- " \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = value;
- right_val_ptr = value + strlen(left_val_ptr) + 1;
-
- if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
- return -1;
- if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
- return -1;
-
- left_val = simple_strtoul(left_val_ptr, NULL, 0);
- right_val = simple_strtoul(right_val_ptr, NULL, 0);
- *tilde_ptr = '~';
-
- if (right_val < left_val) {
- pr_err("Numerical range for parameter \"%s\" contains"
- " a right value which is less than the left.\n",
- param->name);
- return -1;
- }
-
- /*
- * For now, enforce reasonable defaults for [I,O]FMarkInt.
- */
- tilde_ptr = strchr(param->value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range indicator"
- " \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = param->value;
- right_val_ptr = param->value + strlen(left_val_ptr) + 1;
-
- local_left_val = simple_strtoul(left_val_ptr, NULL, 0);
- *tilde_ptr = '~';
-
- if (param->set_param) {
- if ((left_val < local_left_val) ||
- (right_val < local_left_val)) {
- pr_err("Passed value range \"%u~%u\" is below"
- " minimum left value \"%u\" for key \"%s\","
- " rejecting.\n", left_val, right_val,
- local_left_val, param->name);
- return -1;
- }
- } else {
- if ((left_val < local_left_val) &&
- (right_val < local_left_val)) {
- pr_err("Received value range \"%u~%u\" is"
- " below minimum left value \"%u\" for key"
- " \"%s\", rejecting.\n", left_val, right_val,
- local_left_val, param->name);
- SET_PSTATE_REJECT(param);
- if (iscsi_update_param_value(param, REJECT) < 0)
- return -1;
- }
- }
-
- return 0;
-}
-
static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
{
if (IS_PSTATE_PROPOSER(param))
@@ -1027,33 +918,6 @@ static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *val
return 0;
}
-/*
- * This function is used to pick a value range number, currently just
- * returns the lesser of both right values.
- */
-static char *iscsi_get_value_from_number_range(
- struct iscsi_param *param,
- char *value)
-{
- char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
- u32 acceptor_right_value, proposer_right_value;
-
- tilde_ptr1 = strchr(value, '~');
- if (!tilde_ptr1)
- return NULL;
- *tilde_ptr1++ = '\0';
- proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
-
- tilde_ptr2 = strchr(param->value, '~');
- if (!tilde_ptr2)
- return NULL;
- *tilde_ptr2++ = '\0';
- acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
-
- return (acceptor_right_value >= proposer_right_value) ?
- tilde_ptr1 : tilde_ptr2;
-}
-
static char *iscsi_check_valuelist_for_support(
struct iscsi_param *param,
char *value)
@@ -1103,7 +967,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
struct iscsi_conn *conn)
{
u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
- char *negoitated_value = NULL;
+ char *negotiated_value = NULL;
if (IS_PSTATE_ACCEPTOR(param)) {
pr_err("Received key \"%s\" twice, protocol error.\n",
@@ -1203,24 +1067,16 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
pr_debug("Updated %s to target MXDSL value: %s\n",
param->name, param->value);
}
-
- } else if (IS_TYPE_NUMBER_RANGE(param)) {
- negoitated_value = iscsi_get_value_from_number_range(
- param, value);
- if (!negoitated_value)
- return -1;
- if (iscsi_update_param_value(param, negoitated_value) < 0)
- return -1;
} else if (IS_TYPE_VALUE_LIST(param)) {
- negoitated_value = iscsi_check_valuelist_for_support(
+ negotiated_value = iscsi_check_valuelist_for_support(
param, value);
- if (!negoitated_value) {
+ if (!negotiated_value) {
pr_err("Proposer's value list \"%s\" contains"
" no valid values from Acceptor's value list"
" \"%s\".\n", value, param->value);
return -1;
}
- if (iscsi_update_param_value(param, negoitated_value) < 0)
+ if (iscsi_update_param_value(param, negotiated_value) < 0)
return -1;
} else if (IS_PHASE_DECLARATIVE(param)) {
if (iscsi_update_param_value(param, value) < 0)
@@ -1239,47 +1095,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
return -1;
}
- if (IS_TYPE_NUMBER_RANGE(param)) {
- u32 left_val = 0, right_val = 0, recieved_value = 0;
- char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL;
-
- if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
- if (iscsi_update_param_value(param, value) < 0)
- return -1;
- return 0;
- }
-
- tilde_ptr = strchr(value, '~');
- if (tilde_ptr) {
- pr_err("Illegal \"~\" in response for \"%s\".\n",
- param->name);
- return -1;
- }
- tilde_ptr = strchr(param->value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range"
- " indicator \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = param->value;
- right_val_ptr = param->value + strlen(left_val_ptr) + 1;
- left_val = simple_strtoul(left_val_ptr, NULL, 0);
- right_val = simple_strtoul(right_val_ptr, NULL, 0);
- recieved_value = simple_strtoul(value, NULL, 0);
-
- *tilde_ptr = '~';
-
- if ((recieved_value < left_val) ||
- (recieved_value > right_val)) {
- pr_err("Illegal response \"%s=%u\", value must"
- " be between %u and %u.\n", param->name,
- recieved_value, left_val, right_val);
- return -1;
- }
- } else if (IS_TYPE_VALUE_LIST(param)) {
+ if (IS_TYPE_VALUE_LIST(param)) {
char *comma_ptr = NULL, *tmp_ptr = NULL;
comma_ptr = strchr(value, ',');
@@ -1361,9 +1177,6 @@ static int iscsi_check_value(struct iscsi_param *param, char *value)
} else if (IS_TYPE_NUMBER(param)) {
if (iscsi_check_numerical_value(param, value) < 0)
return -1;
- } else if (IS_TYPE_NUMBER_RANGE(param)) {
- if (iscsi_check_numerical_range_value(param, value) < 0)
- return -1;
} else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
if (iscsi_check_string_or_list_value(param, value) < 0)
return -1;
@@ -1483,8 +1296,6 @@ static int iscsi_enforce_integrity_rules(
char *tmpptr;
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
- u8 IFMarker = 0, OFMarker = 0;
- u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;
@@ -1503,28 +1314,12 @@ static int iscsi_enforce_integrity_rules(
if (!strcmp(param->name, MAXBURSTLENGTH))
MaxBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
- if (!strcmp(param->name, IFMARKER))
- if (!strcmp(param->value, YES))
- IFMarker = 1;
- if (!strcmp(param->name, OFMARKER))
- if (!strcmp(param->value, YES))
- OFMarker = 1;
- if (!strcmp(param->name, IFMARKINT))
- if (!strcmp(param->value, REJECT))
- IFMarkInt_Reject = 1;
- if (!strcmp(param->name, OFMARKINT))
- if (!strcmp(param->value, REJECT))
- OFMarkInt_Reject = 1;
}
list_for_each_entry(param, &param_list->param_list, p_list) {
if (!(param->phase & phase))
continue;
- if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
- (strcmp(param->name, IFMARKER) &&
- strcmp(param->name, OFMARKER) &&
- strcmp(param->name, IFMARKINT) &&
- strcmp(param->name, OFMARKINT))))
+ if (!SessionType && !IS_PSTATE_ACCEPTOR(param))
continue;
if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
@@ -1556,38 +1351,6 @@ static int iscsi_enforce_integrity_rules(
param->name, param->value);
}
}
- if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
- if (iscsi_update_param_value(param, NO) < 0)
- return -1;
- IFMarker = 0;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
- if (iscsi_update_param_value(param, NO) < 0)
- return -1;
- OFMarker = 0;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
- if (!strcmp(param->value, REJECT))
- continue;
- param->state &= ~PSTATE_NEGOTIATE;
- if (iscsi_update_param_value(param, IRRELEVANT) < 0)
- return -1;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
- if (!strcmp(param->value, REJECT))
- continue;
- param->state &= ~PSTATE_NEGOTIATE;
- if (iscsi_update_param_value(param, IRRELEVANT) < 0)
- return -1;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
}
return 0;
@@ -1824,24 +1587,6 @@ void iscsi_set_connection_parameters(
*/
pr_debug("MaxRecvDataSegmentLength: %u\n",
ops->MaxRecvDataSegmentLength);
- } else if (!strcmp(param->name, OFMARKER)) {
- ops->OFMarker = !strcmp(param->value, YES);
- pr_debug("OFMarker: %s\n",
- param->value);
- } else if (!strcmp(param->name, IFMARKER)) {
- ops->IFMarker = !strcmp(param->value, YES);
- pr_debug("IFMarker: %s\n",
- param->value);
- } else if (!strcmp(param->name, OFMARKINT)) {
- ops->OFMarkInt =
- simple_strtoul(param->value, &tmpptr, 0);
- pr_debug("OFMarkInt: %s\n",
- param->value);
- } else if (!strcmp(param->name, IFMARKINT)) {
- ops->IFMarkInt =
- simple_strtoul(param->value, &tmpptr, 0);
- pr_debug("IFMarkInt: %s\n",
- param->value);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
ops->InitiatorRecvDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a47046a752aa..a0751e3f0813 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -138,8 +138,8 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define INITIAL_SESSIONTYPE NORMAL
#define INITIAL_IFMARKER NO
#define INITIAL_OFMARKER NO
-#define INITIAL_IFMARKINT "2048~65535"
-#define INITIAL_OFMARKINT "2048~65535"
+#define INITIAL_IFMARKINT REJECT
+#define INITIAL_OFMARKINT REJECT
/*
* Initial values for iSER parameters following RFC-5046 Section 6
@@ -239,10 +239,9 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define TYPERANGE_AUTH 0x0200
#define TYPERANGE_DIGEST 0x0400
#define TYPERANGE_ISCSINAME 0x0800
-#define TYPERANGE_MARKINT 0x1000
-#define TYPERANGE_SESSIONTYPE 0x2000
-#define TYPERANGE_TARGETADDRESS 0x4000
-#define TYPERANGE_UTF8 0x8000
+#define TYPERANGE_SESSIONTYPE 0x1000
+#define TYPERANGE_TARGETADDRESS 0x2000
+#define TYPERANGE_UTF8 0x4000
#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index fe9a582ca6af..cf59c397007b 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -120,7 +120,7 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- int ret, ref_lun;
+ u64 ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -164,7 +164,7 @@ u8 iscsit_tmr_task_reassign(
ref_lun = scsilun_to_int(&hdr->lun);
if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
pr_err("Unable to perform connection recovery for"
- " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+ " differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n",
ref_lun, ref_cmd->se_cmd.orig_fe_lun);
return ISCSI_TMF_RSP_REJECTED;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 5e3295fe404d..968068ffcb1c 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -18,7 +18,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
@@ -67,9 +66,12 @@ int iscsit_load_discovery_tpg(void)
pr_err("Unable to allocate struct iscsi_portal_group\n");
return -1;
}
-
- ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg,
- tpg, TRANSPORT_TPG_TYPE_DISCOVERY);
+ /*
+ * Save iscsi_ops pointer for special case discovery TPG that
+ * doesn't exist as se_wwn->wwn_group within configfs.
+ */
+ tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops;
+ ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1);
if (ret < 0) {
kfree(tpg);
return -1;
@@ -280,8 +282,6 @@ int iscsit_tpg_del_portal_group(
return -EPERM;
}
- core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
-
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b18edda3e8af..a2bff0702eb2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,7 +22,6 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/iscsi/iscsi_transport.h>
#include <target/iscsi/iscsi_target_core.h>
@@ -746,7 +745,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, true, shutdown);
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
}
break;
case ISCSI_OP_REJECT:
@@ -762,7 +761,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
if (!rc && shutdown && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, true, shutdown);
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
}
break;
}
@@ -809,54 +808,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
spin_unlock_bh(&sess->session_usage_lock);
}
-/*
- * Setup conn->if_marker and conn->of_marker values based upon
- * the initial marker-less interval. (see iSCSI v19 A.2)
- */
-int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
-{
- int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
- /*
- * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
- */
- u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
- u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
-
- if (conn->conn_ops->OFMarker) {
- /*
- * Account for the first Login Command received not
- * via iscsi_recv_msg().
- */
- conn->of_marker += ISCSI_HDR_LEN;
- if (conn->of_marker <= OFMarkInt) {
- conn->of_marker = (OFMarkInt - conn->of_marker);
- } else {
- login_ofmarker_count = (conn->of_marker / OFMarkInt);
- next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
- (login_ofmarker_count * MARKER_SIZE);
- conn->of_marker = (next_marker - conn->of_marker);
- }
- conn->of_marker_offset = 0;
- pr_debug("Setting OFMarker value to %u based on Initial"
- " Markerless Interval.\n", conn->of_marker);
- }
-
- if (conn->conn_ops->IFMarker) {
- if (conn->if_marker <= IFMarkInt) {
- conn->if_marker = (IFMarkInt - conn->if_marker);
- } else {
- login_ifmarker_count = (conn->if_marker / IFMarkInt);
- next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
- (login_ifmarker_count * MARKER_SIZE);
- conn->if_marker = (next_marker - conn->if_marker);
- }
- pr_debug("Setting IFMarker value to %u based on Initial"
- " Markerless Interval.\n", conn->if_marker);
- }
-
- return 0;
-}
-
struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
{
struct iscsi_conn *conn;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 1ab754a671ff..995f1cb29d0e 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -34,7 +34,6 @@ extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
-extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 51f0c895c6a5..a556bdebd775 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -35,14 +35,11 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include "tcm_loop.h"
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-static const struct target_core_fabric_ops loop_ops;
-
static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
@@ -165,6 +162,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
transfer_length = scsi_bufflen(sc);
}
+ se_cmd->tag = tl_cmd->sc_cmd_tag;
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
transfer_length, TCM_SIMPLE_TAG,
@@ -217,7 +215,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
- int lun, int task, enum tcm_tmreq_table tmr)
+ u64 lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
@@ -409,7 +407,7 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_id = 2;
sh->max_lun = 0;
sh->max_channel = 0;
- sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+ sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
@@ -520,147 +518,26 @@ static char *tcm_loop_get_fabric_name(void)
return "loopback";
}
-static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
- /*
- * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
- * time based on the protocol dependent prefix of the passed configfs group.
- *
- * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
- * ProtocolID using target_core_fabric_lib.c symbols.
- */
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_FCP:
- return fc_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_fabric_proto_ident(se_tpg);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_fabric_proto_ident(se_tpg);
+ return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
}
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
- return &tl_tpg->tl_hba->tl_wwn_address[0];
+ return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
}
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
- return tl_tpg->tl_tpgt;
-}
-
-static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 tcm_loop_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
-}
-
-static u32 tcm_loop_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
-}
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *tcm_loop_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_FCP:
- return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
+ return tl_tpg(se_tpg)->tl_tpgt;
}
/*
@@ -703,30 +580,6 @@ static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
return tl_tpg->tl_fabric_prot_type;
}
-static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
- struct se_portal_group *se_tpg)
-{
- struct tcm_loop_nacl *tl_nacl;
-
- tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
- if (!tl_nacl) {
- pr_err("Unable to allocate struct tcm_loop_nacl\n");
- return NULL;
- }
-
- return &tl_nacl->se_node_acl;
-}
-
-static void tcm_loop_tpg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
- struct tcm_loop_nacl, se_node_acl);
-
- kfree(tl_nacl);
-}
-
static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -742,14 +595,6 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
return;
}
-static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
-
- return tl_cmd->sc_cmd_tag;
-}
-
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
@@ -902,7 +747,7 @@ static void tcm_loop_port_unlink(
se_lun->unpacked_lun);
if (!sd) {
pr_err("Unable to locate struct scsi_device for %d:%d:"
- "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+ "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
@@ -1234,8 +1079,7 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
/*
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
- ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
if (ret < 0)
return ERR_PTR(-ENOMEM);
@@ -1386,13 +1230,8 @@ static const struct target_core_fabric_ops loop_ops = {
.module = THIS_MODULE,
.name = "loopback",
.get_fabric_name = tcm_loop_get_fabric_name,
- .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident,
.tpg_get_wwn = tcm_loop_get_endpoint_wwn,
.tpg_get_tag = tcm_loop_get_tag,
- .tpg_get_default_depth = tcm_loop_get_default_depth,
- .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_loop_check_demo_mode,
.tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
.tpg_check_demo_mode_write_protect =
@@ -1400,8 +1239,6 @@ static const struct target_core_fabric_ops loop_ops = {
.tpg_check_prod_mode_write_protect =
tcm_loop_check_prod_mode_write_protect,
.tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
- .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl,
- .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl,
.tpg_get_inst_index = tcm_loop_get_inst_index,
.check_stop_free = tcm_loop_check_stop_free,
.release_cmd = tcm_loop_release_cmd,
@@ -1411,7 +1248,6 @@ static const struct target_core_fabric_ops loop_ops = {
.write_pending = tcm_loop_write_pending,
.write_pending_status = tcm_loop_write_pending_status,
.set_default_node_attributes = tcm_loop_set_default_node_attributes,
- .get_task_tag = tcm_loop_get_task_tag,
.get_cmd_state = tcm_loop_get_cmd_state,
.queue_data_in = tcm_loop_queue_data_in,
.queue_status = tcm_loop_queue_status,
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 1e72ff77cac9..4346462094a1 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -2,11 +2,6 @@
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
-/*
- * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
- */
-#define TL_SCSI_MAX_CMD_LEN 32
-
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
@@ -33,10 +28,6 @@ struct tcm_loop_nexus {
struct se_session *se_sess;
};
-struct tcm_loop_nacl {
- struct se_node_acl se_node_acl;
-};
-
#define TCM_TRANSPORT_ONLINE 0
#define TCM_TRANSPORT_OFFLINE 1
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index ce81f17ad1ba..0edf320fb685 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -36,7 +36,6 @@
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <asm/unaligned.h>
@@ -109,13 +108,13 @@ static struct sbp_session *sbp_session_find_by_guid(
}
static struct sbp_login_descriptor *sbp_login_find_by_lun(
- struct sbp_session *session, struct se_lun *lun)
+ struct sbp_session *session, u32 unpacked_lun)
{
struct sbp_login_descriptor *login, *found = NULL;
spin_lock_bh(&session->lock);
list_for_each_entry(login, &session->login_list, link) {
- if (login->lun == lun)
+ if (login->login_lun == unpacked_lun)
found = login;
}
spin_unlock_bh(&session->lock);
@@ -125,7 +124,7 @@ static struct sbp_login_descriptor *sbp_login_find_by_lun(
static int sbp_login_count_all_by_lun(
struct sbp_tpg *tpg,
- struct se_lun *lun,
+ u32 unpacked_lun,
int exclusive)
{
struct se_session *se_sess;
@@ -139,7 +138,7 @@ static int sbp_login_count_all_by_lun(
spin_lock_bh(&sess->lock);
list_for_each_entry(login, &sess->login_list, link) {
- if (login->lun != lun)
+ if (login->login_lun != unpacked_lun)
continue;
if (!exclusive || login->exclusive)
@@ -175,23 +174,23 @@ static struct sbp_login_descriptor *sbp_login_find_by_id(
return found;
}
-static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
+static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
{
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_lun *se_lun;
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return ERR_PTR(-EINVAL);
-
- spin_lock(&se_tpg->tpg_lun_lock);
- se_lun = se_tpg->tpg_lun_list[lun];
-
- if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
- se_lun = ERR_PTR(-ENODEV);
-
- spin_unlock(&se_tpg->tpg_lun_lock);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
+ if (se_lun->unpacked_lun == login_lun) {
+ rcu_read_unlock();
+ *err = 0;
+ return login_lun;
+ }
+ }
+ rcu_read_unlock();
- return se_lun;
+ *err = -ENODEV;
+ return login_lun;
}
static struct sbp_session *sbp_session_create(
@@ -295,17 +294,16 @@ static void sbp_management_request_login(
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
- struct se_lun *se_lun;
- int ret;
- u64 guid;
struct sbp_session *sess;
struct sbp_login_descriptor *login;
struct sbp_login_response_block *response;
- int login_response_len;
+ u64 guid;
+ u32 unpacked_lun;
+ int login_response_len, ret;
- se_lun = sbp_get_lun_from_tpg(tpg,
- LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
- if (IS_ERR(se_lun)) {
+ unpacked_lun = sbp_get_lun_from_tpg(tpg,
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
+ if (ret) {
pr_notice("login to unknown LUN: %d\n",
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
@@ -326,11 +324,11 @@ static void sbp_management_request_login(
}
pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
- se_lun->unpacked_lun, guid);
+ unpacked_lun, guid);
sess = sbp_session_find_by_guid(tpg, guid);
if (sess) {
- login = sbp_login_find_by_lun(sess, se_lun);
+ login = sbp_login_find_by_lun(sess, unpacked_lun);
if (login) {
pr_notice("initiator already logged-in\n");
@@ -358,7 +356,7 @@ static void sbp_management_request_login(
* reject with access_denied if any logins present
*/
if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
- sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
+ sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
pr_warn("refusing exclusive login with other active logins\n");
req->status.status = cpu_to_be32(
@@ -371,7 +369,7 @@ static void sbp_management_request_login(
* check exclusive bit in any existing login descriptor
* reject with access_denied if any exclusive logins present
*/
- if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
+ if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
pr_warn("refusing login while another exclusive login present\n");
req->status.status = cpu_to_be32(
@@ -384,7 +382,7 @@ static void sbp_management_request_login(
* check we haven't exceeded the number of allowed logins
* reject with resources_unavailable if we have
*/
- if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
+ if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
tport->max_logins_per_lun) {
pr_warn("max number of logins reached\n");
@@ -440,7 +438,7 @@ static void sbp_management_request_login(
}
login->sess = sess;
- login->lun = se_lun;
+ login->login_lun = unpacked_lun;
login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
login->login_id = atomic_inc_return(&login_id);
@@ -602,7 +600,7 @@ static void sbp_management_request_logout(
}
pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
- login->lun->unpacked_lun, login->login_id);
+ login->login_lun, login->login_id);
if (req->node_addr != login->sess->node_id) {
pr_warn("logout from different node ID\n");
@@ -1228,12 +1226,14 @@ static void sbp_handle_command(struct sbp_target_request *req)
goto err;
}
- unpacked_lun = req->login->lun->unpacked_lun;
+ unpacked_lun = req->login->login_lun;
sbp_calc_data_length_direction(req, &data_length, &data_dir);
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir);
+ /* only used for printk until we do TMRs */
+ req->se_cmd.tag = req->orb_pointer;
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length,
TCM_SIMPLE_TAG, data_dir, 0))
@@ -1707,33 +1707,6 @@ static u16 sbp_get_tag(struct se_portal_group *se_tpg)
return tpg->tport_tpgt;
}
-static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct sbp_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct sbp_nacl\n");
- return NULL;
- }
-
- return &nacl->se_node_acl;
-}
-
-static void sbp_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct sbp_nacl *nacl =
- container_of(se_nacl, struct sbp_nacl, se_node_acl);
- kfree(nacl);
-}
-
static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -1795,15 +1768,6 @@ static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
return;
}
-static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
-{
- struct sbp_target_request *req = container_of(se_cmd,
- struct sbp_target_request, se_cmd);
-
- /* only used for printk until we do TMRs */
- return (u32)req->orb_pointer;
-}
-
static int sbp_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -1859,106 +1823,23 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
return 1;
}
-/*
- * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
- */
-static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- /*
- * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
- * This is defined in section 7.5.1 Table 362 in spc4r17
- */
- return SCSI_PROTOCOL_SBP;
-}
-
-static u32 sbp_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- int ret;
-
- /*
- * Set PROTOCOL IDENTIFIER to 3h for SBP
- */
- buf[0] = SCSI_PROTOCOL_SBP;
- /*
- * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
- * over IEEE 1394
- */
- ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
- if (ret < 0)
- pr_debug("sbp transport_id: invalid hex string\n");
-
- /*
- * The IEEE 1394 Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-
-static u32 sbp_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- /*
- * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
- * over IEEE 1394
- *
- * The SBP Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *sbp_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- /*
- * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
- * for initiator ports using SCSI over SBP Serial SCSI Protocol
- *
- * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
- * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
-
- return (char *)&buf[8];
-}
-
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
{
- int i, count = 0;
-
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- struct se_lun *se_lun = tpg->tpg_lun_list[i];
-
- if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
- continue;
+ struct se_lun *lun;
+ int count = 0;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
count++;
- }
- spin_unlock(&tpg->tpg_lun_lock);
+ rcu_read_unlock();
return count;
}
static int sbp_update_unit_directory(struct sbp_tport *tport)
{
- int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
+ struct se_lun *lun;
+ int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
u32 *data;
if (tport->unit_directory.data) {
@@ -2020,28 +1901,23 @@ static int sbp_update_unit_directory(struct sbp_tport *tport)
/* unit unique ID (leaf is just after LUNs) */
data[idx++] = 0x8d000000 | (num_luns + 1);
- spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
struct se_device *dev;
int type;
-
- if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
- continue;
-
- spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
-
- dev = se_lun->lun_se_dev;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ dev = rcu_dereference_raw(lun->lun_se_dev);
type = dev->transport->get_device_type(dev);
/* logical_unit_number */
data[idx++] = 0x14000000 |
((type << 16) & 0x1f0000) |
- (se_lun->unpacked_lun & 0xffff);
-
- spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ (lun->unpacked_lun & 0xffff);
}
- spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+ rcu_read_unlock();
/* unit unique ID leaf */
data[idx++] = 2 << 16;
@@ -2100,48 +1976,13 @@ static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
return snprintf(buf, len, "%016llx", wwn);
}
-static struct se_node_acl *sbp_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct sbp_nacl *nacl;
u64 guid = 0;
- u32 nexus_depth = 1;
if (sbp_parse_wwn(name, &guid) < 0)
- return ERR_PTR(-EINVAL);
-
- se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- sbp_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
-
- nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
- nacl->guid = guid;
- sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
-
- return se_nacl;
-}
-
-static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
-{
- struct sbp_nacl *nacl =
- container_of(se_acl, struct sbp_nacl, se_node_acl);
-
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
+ return -EINVAL;
+ return 0;
}
static int sbp_post_link_lun(
@@ -2214,8 +2055,7 @@ static struct se_portal_group *sbp_make_tpg(
goto out_free_tpg;
}
- ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
if (ret < 0)
goto out_unreg_mgt_agt;
@@ -2505,19 +2345,12 @@ static const struct target_core_fabric_ops sbp_ops = {
.module = THIS_MODULE,
.name = "sbp",
.get_fabric_name = sbp_get_fabric_name,
- .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
.tpg_get_wwn = sbp_get_fabric_wwn,
.tpg_get_tag = sbp_get_tag,
- .tpg_get_default_depth = sbp_get_default_depth,
- .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
.tpg_check_demo_mode = sbp_check_true,
.tpg_check_demo_mode_cache = sbp_check_true,
.tpg_check_demo_mode_write_protect = sbp_check_false,
.tpg_check_prod_mode_write_protect = sbp_check_false,
- .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
- .tpg_release_fabric_acl = sbp_release_fabric_acl,
.tpg_get_inst_index = sbp_tpg_get_inst_index,
.release_cmd = sbp_release_cmd,
.shutdown_session = sbp_shutdown_session,
@@ -2526,7 +2359,6 @@ static const struct target_core_fabric_ops sbp_ops = {
.write_pending = sbp_write_pending,
.write_pending_status = sbp_write_pending_status,
.set_default_node_attributes = sbp_set_default_node_attrs,
- .get_task_tag = sbp_get_task_tag,
.get_cmd_state = sbp_get_cmd_state,
.queue_data_in = sbp_queue_data_in,
.queue_status = sbp_queue_status,
@@ -2542,8 +2374,7 @@ static const struct target_core_fabric_ops sbp_ops = {
.fabric_pre_unlink = sbp_pre_unlink_lun,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
- .fabric_make_nodeacl = sbp_make_nodeacl,
- .fabric_drop_nodeacl = sbp_drop_nodeacl,
+ .fabric_init_nodeacl = sbp_init_nodeacl,
.tfc_wwn_attrs = sbp_wwn_attrs,
.tfc_tpg_base_attrs = sbp_tpg_base_attrs,
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
index 6d0d74a2c545..73bcb1208832 100644
--- a/drivers/target/sbp/sbp_target.h
+++ b/drivers/target/sbp/sbp_target.h
@@ -125,7 +125,7 @@ struct sbp_login_descriptor {
struct sbp_session *sess;
struct list_head link;
- struct se_lun *lun;
+ u32 login_lun;
u64 status_fifo_addr;
int exclusive;
@@ -151,15 +151,6 @@ struct sbp_session {
u64 reconnect_expires;
};
-struct sbp_nacl {
- /* Initiator EUI-64 */
- u64 guid;
- /* ASCII formatted GUID for SBP Initiator port */
- char iport_name[SBP_NAMELEN];
- /* Returned by sbp_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
struct sbp_tpg {
/* Target portal group tag for TCM */
u16 tport_tpgt;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 8ca373774276..49aba4a31747 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -34,7 +34,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -43,11 +42,13 @@
static sense_reason_t core_alua_check_transition(int state, int valid,
int *primary);
static int core_alua_set_tg_pt_secondary_state(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port, int explicit, int offline);
+ struct se_lun *lun, int explicit, int offline);
static char *core_alua_dump_state(int state);
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp);
+
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -145,9 +146,8 @@ sense_reason_t
target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ struct se_lun *lun;
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
@@ -222,9 +222,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
rd_len += 8;
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = tg_pt_gp_mem->tg_pt;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
/*
* Start Target Port descriptor format
*
@@ -234,8 +233,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set RELATIVE TARGET PORT IDENTIFIER
*/
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+ buf[off++] = (lun->lun_rtpi & 0xff);
rd_len += 4;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
@@ -259,15 +258,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
* this CDB was received upon to determine this value individually
* for ALUA target port group.
*/
- port = cmd->se_lun->lun_sep;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (tg_pt_gp_mem) {
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (tg_pt_gp)
- buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- }
+ spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
+ if (tg_pt_gp)
+ buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
+ spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
}
transport_kunmap_data_sg(cmd);
@@ -284,10 +279,9 @@ sense_reason_t
target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+ struct se_lun *l_lun = cmd->se_lun;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
sense_reason_t rc = TCM_NO_SENSE;
@@ -295,9 +289,6 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
@@ -312,29 +303,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
- l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
- if (!l_tg_pt_gp_mem) {
- pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = TCM_UNSUPPORTED_SCSI_OPCODE;
- goto out;
- }
- spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&l_lun->lun_tg_pt_gp_lock);
+ l_tg_pt_gp = l_lun->lun_tg_pt_gp;
if (!l_tg_pt_gp) {
- spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+ pr_err("Unable to access l_lun->tg_pt_gp\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
- spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
+ spin_unlock(&l_lun->lun_tg_pt_gp_lock);
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ spin_unlock(&l_lun->lun_tg_pt_gp_lock);
ptr = &buf[4]; /* Skip over RESERVED area in header */
@@ -396,7 +382,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!core_alua_do_port_transition(tg_pt_gp,
- dev, l_port, nacl,
+ dev, l_lun, nacl,
alua_access_state, 1))
found = true;
@@ -406,6 +392,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
+ struct se_lun *lun;
+
/*
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the the incoming
@@ -417,17 +405,16 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* for the struct se_device storage object.
*/
spin_lock(&dev->se_port_lock);
- list_for_each_entry(port, &dev->dev_sep_list,
- sep_list) {
- if (port->sep_rtpi != rtpi)
+ list_for_each_entry(lun, &dev->dev_sep_list,
+ lun_dev_link) {
+ if (lun->lun_rtpi != rtpi)
continue;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-
+ // XXX: racy unlock
spin_unlock(&dev->se_port_lock);
if (!core_alua_set_tg_pt_secondary_state(
- tg_pt_gp_mem, port, 1, 1))
+ lun, 1, 1))
found = true;
spin_lock(&dev->se_port_lock);
@@ -696,9 +683,7 @@ target_alua_state_check(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
- struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
@@ -706,33 +691,27 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
- if (!port)
- return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
* access state: OFFLINE
*/
- if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+ if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
return TCM_CHECK_CONDITION_NOT_READY;
}
- /*
- * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
- * ALUA target port group, to obtain current ALUA access state.
- * Otherwise look for the underlying struct se_device association with
- * a ALUA logical unit group.
- */
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
+
+ if (!lun->lun_tg_pt_gp)
return 0;
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ // XXX: keeps using tg_pt_gp witout reference after unlock
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
@@ -764,7 +743,7 @@ target_alua_state_check(struct se_cmd *cmd)
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
- * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+ * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
@@ -906,10 +885,6 @@ int core_alua_check_nonop_delay(
}
EXPORT_SYMBOL(core_alua_check_nonop_delay);
-/*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
- *
- */
static int core_alua_write_tpg_metadata(
const char *path,
unsigned char *md_buf,
@@ -965,22 +940,15 @@ static int core_alua_update_tpg_primary_metadata(
return rc;
}
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
+static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
- struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct se_dev_entry *se_deve;
+ struct se_lun *lun;
struct se_lun_acl *lacl;
- struct se_port *port;
- struct t10_alua_tg_pt_gp_member *mem;
- bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
- ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = mem->tg_pt;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
/*
* After an implicit target port asymmetric access state
* change, a device server shall establish a unit attention
@@ -995,38 +963,58 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
* every I_T nexus other than the I_T nexus on which the SET
* TARGET PORT GROUPS command
*/
- atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
+ if (!percpu_ref_tryget_live(&lun->lun_ref))
+ continue;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
- spin_lock_bh(&port->sep_alua_lock);
- list_for_each_entry(se_deve, &port->sep_alua_list,
- alua_port_list) {
- lacl = se_deve->se_lun_acl;
+ spin_lock(&lun->lun_deve_lock);
+ list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
+ lacl = rcu_dereference_check(se_deve->se_lun_acl,
+ lockdep_is_held(&lun->lun_deve_lock));
+
/*
- * se_deve->se_lun_acl pointer may be NULL for a
- * entry created without explicit Node+MappedLUN ACLs
+ * spc4r37 p.242:
+ * After an explicit target port asymmetric access
+ * state change, a device server shall establish a
+ * unit attention condition with the additional sense
+ * code set to ASYMMETRIC ACCESS STATE CHANGED for
+ * the initiator port associated with every I_T nexus
+ * other than the I_T nexus on which the SET TARGET
+ * PORT GROUPS command was received.
*/
- if (!lacl)
- continue;
-
if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
- (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
- (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
- (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
- (tg_pt_gp->tg_pt_gp_alua_port == port))
+ (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_lun == lun))
continue;
- core_scsi3_ua_allocate(lacl->se_lun_nacl,
- se_deve->mapped_lun, 0x2A,
+ /*
+ * se_deve->se_lun_acl pointer may be NULL for a
+ * entry created without explicit Node+MappedLUN ACLs
+ */
+ if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
+ continue;
+
+ core_scsi3_ua_allocate(se_deve, 0x2A,
ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
}
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_unlock(&lun->lun_deve_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
+ percpu_ref_put(&lun->lun_ref);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+ bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
+
/*
* Update the ALUA metadata buf that has been allocated in
* core_alua_do_port_transition(), this metadata will be written
@@ -1056,6 +1044,9 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+
+ core_alua_queue_state_change_ua(tg_pt_gp);
+
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1108,6 +1099,8 @@ static int core_alua_do_transition_tg_pt(
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+ core_alua_queue_state_change_ua(tg_pt_gp);
+
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -1142,7 +1135,7 @@ static int core_alua_do_transition_tg_pt(
int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *l_tg_pt_gp,
struct se_device *l_dev,
- struct se_port *l_port,
+ struct se_lun *l_lun,
struct se_node_acl *l_nacl,
int new_state,
int explicit)
@@ -1172,7 +1165,7 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
new_state, explicit);
@@ -1211,10 +1204,10 @@ int core_alua_do_port_transition(
continue;
if (l_tg_pt_gp == tg_pt_gp) {
- tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
- tg_pt_gp->tg_pt_gp_alua_port = NULL;
+ tg_pt_gp->tg_pt_gp_alua_lun = NULL;
tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -1251,22 +1244,20 @@ int core_alua_do_port_transition(
return rc;
}
-/*
- * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
- */
-static int core_alua_update_tpg_secondary_metadata(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port)
+static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
+ struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- struct se_portal_group *se_tpg = port->sep_tpg;
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
int len, rc;
+ mutex_lock(&lun->lun_tg_pt_md_mutex);
+
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
if (!md_buf) {
pr_err("Unable to allocate buf for ALUA metadata\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_unlock;
}
memset(path, 0, ALUA_METADATA_PATH_LEN);
@@ -1281,32 +1272,33 @@ static int core_alua_update_tpg_secondary_metadata(
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
- atomic_read(&port->sep_tg_pt_secondary_offline),
- port->sep_tg_pt_secondary_stat);
+ atomic_read(&lun->lun_tg_pt_secondary_offline),
+ lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+ snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- port->sep_lun->unpacked_lun);
+ lun->unpacked_lun);
rc = core_alua_write_tpg_metadata(path, md_buf, len);
kfree(md_buf);
+out_unlock:
+ mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
}
static int core_alua_set_tg_pt_secondary_state(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
+ struct se_lun *lun,
int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
int trans_delay_msecs;
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
if (!tg_pt_gp) {
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
pr_err("Unable to complete secondary state"
" transition\n");
return -EINVAL;
@@ -1314,14 +1306,14 @@ static int core_alua_set_tg_pt_secondary_state(
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
* Set the secondary ALUA target port access state to OFFLINE
- * or release the previously secondary state for struct se_port
+ * or release the previously secondary state for struct se_lun
*/
if (offline)
- atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
else
- atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
- port->sep_tg_pt_secondary_stat = (explicit) ?
+ lun->lun_tg_pt_secondary_stat = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
@@ -1330,7 +1322,7 @@ static int core_alua_set_tg_pt_secondary_state(
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
/*
* Do the optional transition delay after we set the secondary
* ALUA access state.
@@ -1341,11 +1333,8 @@ static int core_alua_set_tg_pt_secondary_state(
* See if we need to update the ALUA fabric port metadata for
* secondary state and status
*/
- if (port->sep_tg_pt_secondary_write_md) {
- mutex_lock(&port->sep_tg_pt_md_mutex);
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
- mutex_unlock(&port->sep_tg_pt_md_mutex);
- }
+ if (lun->lun_tg_pt_secondary_write_md)
+ core_alua_update_tpg_secondary_metadata(lun);
return 0;
}
@@ -1699,7 +1688,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
- INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
@@ -1793,32 +1782,11 @@ again:
return 0;
}
-struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
- struct se_port *port)
-{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
- tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
- GFP_KERNEL);
- if (!tg_pt_gp_mem) {
- pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
-
- tg_pt_gp_mem->tg_pt = port;
- port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-
- return tg_pt_gp_mem;
-}
-
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+ struct se_lun *lun, *next;
/*
* Once we have reached this point, config_item_put() has already
@@ -1849,30 +1817,24 @@ void core_alua_free_tg_pt_gp(
* struct se_port.
*/
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
- &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
- if (tg_pt_gp_mem->tg_pt_gp_assoc) {
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp->tg_pt_gp_members--;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
- }
+ list_for_each_entry_safe(lun, next,
+ &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
+ list_del_init(&lun->lun_tg_pt_gp_link);
+ tg_pt_gp->tg_pt_gp_members--;
+
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
- * tg_pt_gp_mem is associated with a single
- * se_port->sep_alua_tg_pt_gp_mem, and is released via
- * core_alua_free_tg_pt_gp_mem().
- *
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
* assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_lock(&lun->lun_tg_pt_gp_lock);
if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ __target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp);
} else
- tg_pt_gp_mem->tg_pt_gp = NULL;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ lun->lun_tg_pt_gp = NULL;
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
}
@@ -1881,35 +1843,6 @@ void core_alua_free_tg_pt_gp(
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
}
-void core_alua_free_tg_pt_gp_mem(struct se_port *port)
-{
- struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return;
-
- while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
- cpu_relax();
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (tg_pt_gp) {
- spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- if (tg_pt_gp_mem->tg_pt_gp_assoc) {
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp->tg_pt_gp_members--;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
- }
- spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
- tg_pt_gp_mem->tg_pt_gp = NULL;
- }
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-
- kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
-}
-
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct se_device *dev, const char *name)
{
@@ -1943,50 +1876,65 @@ static void core_alua_put_tg_pt_gp_from_name(
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-void __core_alua_attach_tg_pt_gp_mem(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ struct se_dev_entry *se_deve;
+
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
- tg_pt_gp_mem->tg_pt_gp_assoc = 1;
- list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
- &tg_pt_gp->tg_pt_gp_mem_list);
+ lun->lun_tg_pt_gp = tg_pt_gp;
+ list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
tg_pt_gp->tg_pt_gp_members++;
+ spin_lock(&lun->lun_deve_lock);
+ list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+ core_scsi3_ua_allocate(se_deve, 0x3f,
+ ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
+ spin_unlock(&lun->lun_deve_lock);
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-static void __core_alua_drop_tg_pt_gp_mem(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+void target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ __target_attach_tg_pt_gp(lun, tg_pt_gp);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+static void __target_detach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp_mem->tg_pt_gp = NULL;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ list_del_init(&lun->lun_tg_pt_gp_link);
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ lun->lun_tg_pt_gp = NULL;
}
-ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+void target_detach_tg_pt_gp(struct se_lun *lun)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
+ if (tg_pt_gp)
+ __target_detach_tg_pt_gp(lun, tg_pt_gp);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
{
struct config_item *tg_pt_ci;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return len;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
@@ -1998,34 +1946,33 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
&tg_pt_gp->tg_pt_gp_alua_access_state)),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
- (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+ atomic_read(&lun->lun_tg_pt_secondary_offline) ?
"Offline" : "None",
- core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+ core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
}
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
return len;
}
ssize_t core_alua_store_tg_pt_gp_info(
- struct se_port *port,
+ struct se_lun *lun,
const char *page,
size_t count)
{
- struct se_portal_group *tpg;
- struct se_lun *lun;
- struct se_device *dev = port->sep_lun->lun_se_dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- tpg = port->sep_tpg;
- lun = port->sep_lun;
-
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return 0;
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ return -ENODEV;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
@@ -2049,8 +1996,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
return -ENODEV;
}
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
@@ -2068,24 +2015,19 @@ ssize_t core_alua_store_tg_pt_gp_info(
&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id);
- __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ __target_detach_tg_pt_gp(lun, tg_pt_gp);
+ __target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
return count;
}
- /*
- * Removing existing association of tg_pt_gp_mem with tg_pt_gp
- */
- __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+ __target_detach_tg_pt_gp(lun, tg_pt_gp);
move = 1;
}
- /*
- * Associate tg_pt_gp_mem with tg_pt_gp_new.
- */
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
@@ -2268,11 +2210,8 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
- if (!lun->lun_sep)
- return -ENODEV;
-
return sprintf(page, "%d\n",
- atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+ atomic_read(&lun->lun_tg_pt_secondary_offline));
}
ssize_t core_alua_store_offline_bit(
@@ -2280,11 +2219,16 @@ ssize_t core_alua_store_offline_bit(
const char *page,
size_t count)
{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
unsigned long tmp;
int ret;
- if (!lun->lun_sep)
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
ret = kstrtoul(page, 0, &tmp);
@@ -2297,14 +2241,8 @@ ssize_t core_alua_store_offline_bit(
tmp);
return -EINVAL;
}
- tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem) {
- pr_err("Unable to locate *tg_pt_gp_mem\n");
- return -EINVAL;
- }
- ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
- lun->lun_sep, 0, (int)tmp);
+ ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
if (ret < 0)
return -EINVAL;
@@ -2315,7 +2253,7 @@ ssize_t core_alua_show_secondary_status(
struct se_lun *lun,
char *page)
{
- return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+ return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
}
ssize_t core_alua_store_secondary_status(
@@ -2338,7 +2276,7 @@ ssize_t core_alua_store_secondary_status(
tmp);
return -EINVAL;
}
- lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+ lun->lun_tg_pt_secondary_stat = (int)tmp;
return count;
}
@@ -2347,8 +2285,7 @@ ssize_t core_alua_show_secondary_write_metadata(
struct se_lun *lun,
char *page)
{
- return sprintf(page, "%d\n",
- lun->lun_sep->sep_tg_pt_secondary_write_md);
+ return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
}
ssize_t core_alua_store_secondary_write_metadata(
@@ -2369,7 +2306,7 @@ ssize_t core_alua_store_secondary_write_metadata(
" %lu\n", tmp);
return -EINVAL;
}
- lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+ lun->lun_tg_pt_secondary_write_md = (int)tmp;
return count;
}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 0a7d65e80404..9b250f9b33bf 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -85,7 +85,6 @@
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
-extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern struct kmem_cache *t10_alua_lba_map_cache;
extern struct kmem_cache *t10_alua_lba_map_mem_cache;
@@ -94,7 +93,7 @@ extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
- struct se_device *, struct se_port *,
+ struct se_device *, struct se_lun *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
@@ -117,14 +116,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
-extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
- struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
-extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
-extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
- struct t10_alua_tg_pt_gp *);
-extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
-extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+extern void target_detach_tg_pt_gp(struct se_lun *);
+extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *,
size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e7b0430a0575..0b0de3647478 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -41,7 +41,6 @@
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_internal.h"
@@ -51,15 +50,26 @@
#include "target_core_xcopy.h"
#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
-static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
{ \
- struct target_backend_cits *tbc = &sa->tb_cits; \
- struct config_item_type *cit = &tbc->tb_##_name##_cit; \
+ struct config_item_type *cit = &tb->tb_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
- cit->ct_owner = sa->owner; \
+ cit->ct_owner = tb->ops->owner; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
+#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
+{ \
+ struct config_item_type *cit = &tb->tb_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
+ cit->ct_owner = tb->ops->owner; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
@@ -92,7 +102,7 @@ static ssize_t target_core_attr_show(struct config_item *item,
char *page)
{
return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
- " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+ " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
utsname()->sysname, utsname()->machine);
}
@@ -116,7 +126,7 @@ static struct target_fabric_configfs *target_core_get_fabric(
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!strcmp(tf->tf_name, name)) {
+ if (!strcmp(tf->tf_ops->name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -193,29 +203,24 @@ static struct config_group *target_core_register_fabric(
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
- " %s\n", tf->tf_name);
+ " %s\n", tf->tf_ops->name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
- &tf->tf_cit_tmpl.tfc_wwn_cit);
+ &tf->tf_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
tf->tf_group.default_groups[1] = NULL;
- config_group_init_type_name(&tf->tf_group, name,
- &tf->tf_cit_tmpl.tfc_wwn_cit);
+ config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
- &tf->tf_cit_tmpl.tfc_discovery_cit);
+ &tf->tf_discovery_cit);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
- tf->tf_fabric = &tf->tf_group.cg_item;
- pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
- " for %s\n", name);
-
return &tf->tf_group;
}
@@ -236,13 +241,9 @@ static void target_core_deregister_fabric(
" tf list\n", config_item_name(item));
pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
- " %s\n", tf->tf_name);
+ " %s\n", tf->tf_ops->name);
atomic_dec(&tf->tf_access_cnt);
- pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
- " tf->tf_fabric for %s\n", tf->tf_name);
- tf->tf_fabric = NULL;
-
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
@@ -318,10 +319,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
}
- if (!tfo->get_fabric_proto_ident) {
- pr_err("Missing tfo->get_fabric_proto_ident()\n");
- return -EINVAL;
- }
if (!tfo->tpg_get_wwn) {
pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
@@ -330,18 +327,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
- if (!tfo->tpg_get_default_depth) {
- pr_err("Missing tfo->tpg_get_default_depth()\n");
- return -EINVAL;
- }
- if (!tfo->tpg_get_pr_transport_id) {
- pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
- return -EINVAL;
- }
- if (!tfo->tpg_get_pr_transport_id_len) {
- pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
- return -EINVAL;
- }
if (!tfo->tpg_check_demo_mode) {
pr_err("Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
@@ -358,14 +343,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
- if (!tfo->tpg_alloc_fabric_acl) {
- pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
- return -EINVAL;
- }
- if (!tfo->tpg_release_fabric_acl) {
- pr_err("Missing tfo->tpg_release_fabric_acl()\n");
- return -EINVAL;
- }
if (!tfo->tpg_get_inst_index) {
pr_err("Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
@@ -398,10 +375,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
- if (!tfo->get_task_tag) {
- pr_err("Missing tfo->get_task_tag()\n");
- return -EINVAL;
- }
if (!tfo->get_cmd_state) {
pr_err("Missing tfo->get_cmd_state()\n");
return -EINVAL;
@@ -464,15 +437,7 @@ int target_register_template(const struct target_core_fabric_ops *fo)
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
-
- /*
- * Setup the default generic struct config_item_type's (cits) in
- * struct target_fabric_configfs->tf_cit_tmpl
- */
- tf->tf_module = fo->module;
- snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
-
- tf->tf_ops = *fo;
+ tf->tf_ops = fo;
target_fabric_setup_cits(tf);
mutex_lock(&g_tf_lock);
@@ -489,7 +454,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
mutex_lock(&g_tf_lock);
list_for_each_entry(t, &g_tf_list, tf_list) {
- if (!strcmp(t->tf_name, fo->name)) {
+ if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
kfree(t);
@@ -505,16 +470,605 @@ EXPORT_SYMBOL(target_unregister_template);
//############################################################################*/
/* Start functions for struct config_item_type tb_dev_attrib_cit */
+#define DEF_TB_DEV_ATTRIB_SHOW(_name) \
+static ssize_t show_##_name(struct se_dev_attrib *da, char *page) \
+{ \
+ return snprintf(page, PAGE_SIZE, "%u\n", da->_name); \
+}
+
+DEF_TB_DEV_ATTRIB_SHOW(emulate_model_alias);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_dpo);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_write);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_read);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_write_cache);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tas);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tpu);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tpws);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_caw);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_3pc);
+DEF_TB_DEV_ATTRIB_SHOW(pi_prot_type);
+DEF_TB_DEV_ATTRIB_SHOW(hw_pi_prot_type);
+DEF_TB_DEV_ATTRIB_SHOW(pi_prot_format);
+DEF_TB_DEV_ATTRIB_SHOW(enforce_pr_isids);
+DEF_TB_DEV_ATTRIB_SHOW(is_nonrot);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_rest_reord);
+DEF_TB_DEV_ATTRIB_SHOW(force_pr_aptpl);
+DEF_TB_DEV_ATTRIB_SHOW(hw_block_size);
+DEF_TB_DEV_ATTRIB_SHOW(block_size);
+DEF_TB_DEV_ATTRIB_SHOW(hw_max_sectors);
+DEF_TB_DEV_ATTRIB_SHOW(optimal_sectors);
+DEF_TB_DEV_ATTRIB_SHOW(hw_queue_depth);
+DEF_TB_DEV_ATTRIB_SHOW(queue_depth);
+DEF_TB_DEV_ATTRIB_SHOW(max_unmap_lba_count);
+DEF_TB_DEV_ATTRIB_SHOW(max_unmap_block_desc_count);
+DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity);
+DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity_alignment);
+DEF_TB_DEV_ATTRIB_SHOW(max_write_same_len);
+
+#define DEF_TB_DEV_ATTRIB_STORE_U32(_name) \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+ size_t count) \
+{ \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret < 0) \
+ return ret; \
+ da->_name = val; \
+ return count; \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_lba_count);
+DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_block_desc_count);
+DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity);
+DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity_alignment);
+DEF_TB_DEV_ATTRIB_STORE_U32(max_write_same_len);
+
+#define DEF_TB_DEV_ATTRIB_STORE_BOOL(_name) \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+ size_t count) \
+{ \
+ bool flag; \
+ int ret; \
+ \
+ ret = strtobool(page, &flag); \
+ if (ret < 0) \
+ return ret; \
+ da->_name = flag; \
+ return count; \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_fua_write);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_caw);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_3pc);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(enforce_pr_isids);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(is_nonrot);
+
+#define DEF_TB_DEV_ATTRIB_STORE_STUB(_name) \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+ size_t count) \
+{ \
+ printk_once(KERN_WARNING \
+ "ignoring deprecated ##_name## attribute\n"); \
+ return count; \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_dpo);
+DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_fua_read);
+
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+ const char *configname;
+
+ configname = config_item_name(&dev->dev_group.cg_item);
+ if (strlen(configname) >= 16) {
+ pr_warn("dev[%p]: Backstore name '%s' is too long for "
+ "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+ configname);
+ }
+ snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+static ssize_t store_emulate_model_alias(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change model alias"
+ " while export_count is %d\n",
+ dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag) {
+ dev_set_t10_wwn_model_alias(dev);
+ } else {
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ }
+ da->emulate_model_alias = flag;
+ return count;
+}
+
+static ssize_t store_emulate_write_cache(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag && da->da_dev->transport->get_write_cache) {
+ pr_err("emulate_write_cache not supported for this device\n");
+ return -EINVAL;
+ }
+
+ da->emulate_write_cache = flag;
+ pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t store_emulate_ua_intlck_ctrl(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 0 && val != 1 && val != 2) {
+ pr_err("Illegal value %d\n", val);
+ return -EINVAL;
+ }
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " UA_INTRLCK_CTRL while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
+ return -EINVAL;
+ }
+ da->emulate_ua_intlck_ctrl = val;
+ pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ da->da_dev, val);
+ return count;
+}
+
+static ssize_t store_emulate_tas(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device TAS while"
+ " export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
+ return -EINVAL;
+ }
+ da->emulate_tas = flag;
+ pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ da->da_dev, flag ? "Enabled" : "Disabled");
+
+ return count;
+}
+
+static ssize_t store_emulate_tpu(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (flag && !da->max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ da->emulate_tpu = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t store_emulate_tpws(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (flag && !da->max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ da->emulate_tpws = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ int old_prot = da->pi_prot_type, ret;
+ struct se_device *dev = da->da_dev;
+ u32 flag;
+
+ ret = kstrtou32(page, 0, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+ pr_err("Illegal value %d for pi_prot_type\n", flag);
+ return -EINVAL;
+ }
+ if (flag == 2) {
+ pr_err("DIF TYPE2 protection currently not supported\n");
+ return -ENOSYS;
+ }
+ if (da->hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return count;
+ }
+ if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ /* 0 is only allowed value for non-supporting backends */
+ if (flag == 0)
+ return 0;
+
+ pr_err("DIF protection not supported by backend: %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ da->pi_prot_type = flag;
+
+ if (flag && !old_prot) {
+ ret = dev->transport->init_prot(dev);
+ if (ret) {
+ da->pi_prot_type = old_prot;
+ return ret;
+ }
+
+ } else if (!flag && old_prot) {
+ dev->transport->free_prot(dev);
+ }
+
+ pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+ return count;
+}
+
+static ssize_t store_pi_prot_format(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (!flag)
+ return count;
+
+ if (!dev->transport->format_prot) {
+ pr_err("DIF protection format not supported by backend %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection format requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to format SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ ret = dev->transport->format_prot(dev);
+ if (ret)
+ return ret;
+
+ pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+ return count;
+}
+
+static ssize_t store_force_pr_aptpl(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to set force_pr_aptpl while"
+ " export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
+ return -EINVAL;
+ }
+
+ da->force_pr_aptpl = flag;
+ pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
+ return count;
+}
+
+static ssize_t store_emulate_rest_reord(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ bool flag;
+ int ret;
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag != 0) {
+ printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
+ " reordering not implemented\n", da->da_dev);
+ return -ENOSYS;
+ }
+ da->emulate_rest_reord = flag;
+ pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+static ssize_t store_queue_depth(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ struct se_device *dev = da->da_dev;
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device TCQ while"
+ " export_count is %d\n",
+ dev, dev->export_count);
+ return -EINVAL;
+ }
+ if (!val) {
+ pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
+ return -EINVAL;
+ }
+
+ if (val > dev->dev_attrib.queue_depth) {
+ if (val > dev->dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, val,
+ dev->dev_attrib.hw_queue_depth);
+ return -EINVAL;
+ }
+ }
+ da->queue_depth = dev->queue_depth = val;
+ pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
+ return count;
+}
+
+static ssize_t store_optimal_sectors(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " optimal_sectors while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
+ return -EINVAL;
+ }
+ if (val > da->hw_max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+ " greater than hw_max_sectors: %u\n",
+ da->da_dev, val, da->hw_max_sectors);
+ return -EINVAL;
+ }
+
+ da->optimal_sectors = val;
+ pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
+ da->da_dev, val);
+ return count;
+}
+
+static ssize_t store_block_size(struct se_dev_attrib *da,
+ const char *page, size_t count)
+{
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device block_size"
+ " while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
+ return -EINVAL;
+ }
+
+ if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
+ pr_err("dev[%p]: Illegal value for block_device: %u"
+ " for SE device, must be 512, 1024, 2048 or 4096\n",
+ da->da_dev, val);
+ return -EINVAL;
+ }
+
+ da->block_size = val;
+ if (da->max_bytes_per_io)
+ da->hw_max_sectors = da->max_bytes_per_io / val;
+
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+ da->da_dev, val);
+ return count;
+}
+
+CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
+#define TB_DEV_ATTR(_backend, _name, _mode) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ show_##_name, \
+ store_##_name);
+
+#define TB_DEV_ATTR_RO(_backend, _name) \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ show_##_name);
+
+TB_DEV_ATTR(target_core, emulate_model_alias, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_dpo, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_fua_write, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_fua_read, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_write_cache, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tas, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tpu, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tpws, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_caw, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_3pc, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, pi_prot_type, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR_RO(target_core, hw_pi_prot_type);
+TB_DEV_ATTR(target_core, pi_prot_format, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, enforce_pr_isids, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, is_nonrot, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_rest_reord, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, force_pr_aptpl, S_IRUGO | S_IWUSR)
+TB_DEV_ATTR_RO(target_core, hw_block_size);
+TB_DEV_ATTR(target_core, block_size, S_IRUGO | S_IWUSR)
+TB_DEV_ATTR_RO(target_core, hw_max_sectors);
+TB_DEV_ATTR(target_core, optimal_sectors, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR_RO(target_core, hw_queue_depth);
+TB_DEV_ATTR(target_core, queue_depth, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_unmap_lba_count, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, unmap_granularity, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_write_same_len, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+/*
+ * dev_attrib attributes for devices using the target core SBC/SPC
+ * interpreter. Any backend using spc_parse_cdb should be using
+ * these.
+ */
+struct configfs_attribute *sbc_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_model_alias.attr,
+ &target_core_dev_attrib_emulate_dpo.attr,
+ &target_core_dev_attrib_emulate_fua_write.attr,
+ &target_core_dev_attrib_emulate_fua_read.attr,
+ &target_core_dev_attrib_emulate_write_cache.attr,
+ &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &target_core_dev_attrib_emulate_tas.attr,
+ &target_core_dev_attrib_emulate_tpu.attr,
+ &target_core_dev_attrib_emulate_tpws.attr,
+ &target_core_dev_attrib_emulate_caw.attr,
+ &target_core_dev_attrib_emulate_3pc.attr,
+ &target_core_dev_attrib_pi_prot_type.attr,
+ &target_core_dev_attrib_hw_pi_prot_type.attr,
+ &target_core_dev_attrib_pi_prot_format.attr,
+ &target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_is_nonrot.attr,
+ &target_core_dev_attrib_emulate_rest_reord.attr,
+ &target_core_dev_attrib_force_pr_aptpl.attr,
+ &target_core_dev_attrib_hw_block_size.attr,
+ &target_core_dev_attrib_block_size.attr,
+ &target_core_dev_attrib_hw_max_sectors.attr,
+ &target_core_dev_attrib_optimal_sectors.attr,
+ &target_core_dev_attrib_hw_queue_depth.attr,
+ &target_core_dev_attrib_queue_depth.attr,
+ &target_core_dev_attrib_max_unmap_lba_count.attr,
+ &target_core_dev_attrib_max_unmap_block_desc_count.attr,
+ &target_core_dev_attrib_unmap_granularity.attr,
+ &target_core_dev_attrib_unmap_granularity_alignment.attr,
+ &target_core_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+EXPORT_SYMBOL(sbc_attrib_attrs);
+
+TB_DEV_ATTR_RO(target_pt, hw_pi_prot_type);
+TB_DEV_ATTR_RO(target_pt, hw_block_size);
+TB_DEV_ATTR_RO(target_pt, hw_max_sectors);
+TB_DEV_ATTR_RO(target_pt, hw_queue_depth);
+
+/*
+ * Minimal dev_attrib attributes for devices passing through CDBs.
+ * In this case we only provide a few read-only attributes for
+ * backwards compatibility.
+ */
+struct configfs_attribute *passthrough_attrib_attrs[] = {
+ &target_pt_dev_attrib_hw_pi_prot_type.attr,
+ &target_pt_dev_attrib_hw_block_size.attr,
+ &target_pt_dev_attrib_hw_max_sectors.attr,
+ &target_pt_dev_attrib_hw_queue_depth.attr,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_attrib_attrs);
+
static struct configfs_item_operations target_core_dev_attrib_ops = {
.show_attribute = target_core_dev_attrib_attr_show,
.store_attribute = target_core_dev_attrib_attr_store,
};
-TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
+TB_CIT_SETUP_DRV(dev_attrib, &target_core_dev_attrib_ops, NULL);
/* End functions for struct config_item_type tb_dev_attrib_cit */
@@ -862,7 +1416,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct se_device *dev, char *page)
{
struct se_node_acl *se_nacl;
- struct se_lun *lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
const struct target_core_fabric_ops *tfo;
@@ -877,7 +1430,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
- lun = pr_reg->pr_reg_tg_pt_lun;
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
@@ -885,9 +1437,9 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifier Tag: %hu %s Portal Group Tag: %hu"
- " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+ " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
- tfo->get_fabric_name(), lun->unpacked_lun);
+ tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
@@ -1012,12 +1564,12 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%d"},
+ {Opt_mapped_lun, "mapped_lun=%lld"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%d"},
+ {Opt_target_lun, "target_lun=%lld"},
{Opt_err, NULL}
};
@@ -1032,10 +1584,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
u64 sa_res_key = 0;
- u32 mapped_lun = 0, target_lun = 0;
+ u64 mapped_lun = 0, target_lun = 0;
int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
- u16 port_rpti = 0, tpgt = 0;
- u8 type = 0, scope;
+ u16 tpgt = 0;
+ u8 type = 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
@@ -1115,7 +1667,6 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
break;
case Opt_res_scope:
match_int(args, &arg);
- scope = (u8)arg;
break;
case Opt_res_all_tg_pt:
match_int(args, &arg);
@@ -1123,7 +1674,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
break;
case Opt_mapped_lun:
match_int(args, &arg);
- mapped_lun = (u32)arg;
+ mapped_lun = (u64)arg;
break;
/*
* PR APTPL Metadata for Target Port
@@ -1155,11 +1706,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
break;
case Opt_port_rtpi:
match_int(args, &arg);
- port_rpti = (u16)arg;
break;
case Opt_target_lun:
match_int(args, &arg);
- target_lun = (u32)arg;
+ target_lun = (u64)arg;
break;
default:
break;
@@ -1223,13 +1773,13 @@ TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
static ssize_t target_core_show_dev_info(void *p, char *page)
{
struct se_device *dev = p;
- struct se_subsystem_api *t = dev->transport;
int bl = 0;
ssize_t read_bytes = 0;
transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
- read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
+ read_bytes += dev->transport->show_configfs_dev_params(dev,
+ page+read_bytes);
return read_bytes;
}
@@ -1247,9 +1797,8 @@ static ssize_t target_core_store_dev_control(
size_t count)
{
struct se_device *dev = p;
- struct se_subsystem_api *t = dev->transport;
- return t->set_configfs_dev_params(dev, page, count);
+ return dev->transport->set_configfs_dev_params(dev, page, count);
}
static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -2339,21 +2888,16 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- struct se_port *port;
- struct se_portal_group *tpg;
struct se_lun *lun;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = tg_pt_gp_mem->tg_pt;
- tpg = port->sep_tpg;
- lun = port->sep_lun;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
+ struct se_portal_group *tpg = lun->lun_tpg;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -2526,9 +3070,9 @@ static struct config_group *target_core_make_subdev(
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
+ struct target_backend *tb = hba->backend;
struct se_device *dev;
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
struct config_group *dev_stat_grp = NULL;
@@ -2537,10 +3081,6 @@ static struct config_group *target_core_make_subdev(
ret = mutex_lock_interruptible(&hba->hba_access_mutex);
if (ret)
return ERR_PTR(ret);
- /*
- * Locate the struct se_subsystem_api from parent's struct se_hba.
- */
- t = hba->transport;
dev = target_alloc_device(hba, name);
if (!dev)
@@ -2553,17 +3093,17 @@ static struct config_group *target_core_make_subdev(
if (!dev_cg->default_groups)
goto out_free_device;
- config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
+ config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
- &t->tb_cits.tb_dev_attrib_cit);
+ &tb->tb_dev_attrib_cit);
config_group_init_type_name(&dev->dev_pr_group, "pr",
- &t->tb_cits.tb_dev_pr_cit);
+ &tb->tb_dev_pr_cit);
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
- &t->tb_cits.tb_dev_wwn_cit);
+ &tb->tb_dev_wwn_cit);
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
- "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
+ "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
- "statistics", &t->tb_cits.tb_dev_stat_cit);
+ "statistics", &tb->tb_dev_stat_cit);
dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -2693,8 +3233,8 @@ static ssize_t target_core_hba_show_attr_hba_info(
char *page)
{
return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
- hba->hba_id, hba->transport->name,
- TARGET_CORE_CONFIGFS_VERSION);
+ hba->hba_id, hba->backend->ops->name,
+ TARGET_CORE_VERSION);
}
SE_HBA_ATTR_RO(hba_info);
@@ -2713,11 +3253,10 @@ static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
const char *page, size_t count)
{
- struct se_subsystem_api *transport = hba->transport;
unsigned long mode_flag;
int ret;
- if (transport->pmode_enable_hba == NULL)
+ if (hba->backend->ops->pmode_enable_hba == NULL)
return -EINVAL;
ret = kstrtoul(page, 0, &mode_flag);
@@ -2731,7 +3270,7 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
return -EINVAL;
}
- ret = transport->pmode_enable_hba(hba, mode_flag);
+ ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
return -EINVAL;
if (ret > 0)
@@ -2857,16 +3396,15 @@ static struct config_item_type target_core_cit = {
/* Stop functions for struct config_item_type target_core_hba_cit */
-void target_core_setup_sub_cits(struct se_subsystem_api *sa)
+void target_setup_backend_cits(struct target_backend *tb)
{
- target_core_setup_dev_cit(sa);
- target_core_setup_dev_attrib_cit(sa);
- target_core_setup_dev_pr_cit(sa);
- target_core_setup_dev_wwn_cit(sa);
- target_core_setup_dev_alua_tg_pt_gps_cit(sa);
- target_core_setup_dev_stat_cit(sa);
+ target_core_setup_dev_cit(tb);
+ target_core_setup_dev_attrib_cit(tb);
+ target_core_setup_dev_pr_cit(tb);
+ target_core_setup_dev_wwn_cit(tb);
+ target_core_setup_dev_alua_tg_pt_gps_cit(tb);
+ target_core_setup_dev_stat_cit(tb);
}
-EXPORT_SYMBOL(target_core_setup_sub_cits);
static int __init target_core_init_configfs(void)
{
@@ -2968,7 +3506,7 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
- " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+ " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
* Register built-in RAMDISK subsystem logic for virtual LUN 0
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 417f88b498c7..09e682b1c549 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -56,40 +56,37 @@ static struct se_hba *lun0_hba;
struct se_device *g_lun0_dev;
sense_reason_t
-transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
- struct se_device *dev;
- unsigned long flags;
-
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return TCM_NON_EXISTENT_LUN;
-
- spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
- if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- struct se_dev_entry *deve = se_cmd->se_deve;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ struct se_dev_entry *deve;
- deve->total_cmds++;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, unpacked_lun);
+ if (deve) {
+ atomic_long_inc(&deve->total_cmds);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
- " Access for 0x%08x\n",
+ " Access for 0x%08llx\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ rcu_read_unlock();
return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
- deve->write_bytes += se_cmd->data_length;
+ atomic_long_add(se_cmd->data_length,
+ &deve->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- deve->read_bytes += se_cmd->data_length;
+ atomic_long_add(se_cmd->data_length,
+ &deve->read_bytes);
- se_lun = deve->se_lun;
- se_cmd->se_lun = deve->se_lun;
+ se_lun = rcu_dereference(deve->se_lun);
+ se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
@@ -97,7 +94,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
}
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ rcu_read_unlock();
if (!se_lun) {
/*
@@ -107,7 +104,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
*/
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
+ " Access for 0x%08llx\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
return TCM_NON_EXISTENT_LUN;
@@ -119,64 +116,66 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
- se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_lun = se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
}
+ /*
+ * RCU reference protected by percpu se_lun->lun_ref taken above that
+ * must drop to zero (including initial reference) before this se_lun
+ * pointer can be kfree_rcu() by the final se_lun->lun_group put via
+ * target_core_fabric_configfs.c:target_fabric_port_release
+ */
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ atomic_long_inc(&se_cmd->se_dev->num_cmds);
- /* Directly associate cmd with se_dev */
- se_cmd->se_dev = se_lun->lun_se_dev;
-
- dev = se_lun->lun_se_dev;
- atomic_long_inc(&dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length, &dev->write_bytes);
+ atomic_long_add(se_cmd->data_length,
+ &se_cmd->se_dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length, &dev->read_bytes);
+ atomic_long_add(se_cmd->data_length,
+ &se_cmd->se_dev->read_bytes);
return 0;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
-int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -ENODEV;
-
- spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
- deve = se_cmd->se_deve;
-
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- se_tmr->tmr_lun = deve->se_lun;
- se_cmd->se_lun = deve->se_lun;
- se_lun = deve->se_lun;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, unpacked_lun);
+ if (deve) {
+ se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
+ se_cmd->se_lun = rcu_dereference(deve->se_lun);
+ se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
}
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ rcu_read_unlock();
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
+ " Access for 0x%08llx\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
return -ENODEV;
}
-
- /* Directly associate cmd with se_dev */
- se_cmd->se_dev = se_lun->lun_se_dev;
- se_tmr->tmr_dev = se_lun->lun_se_dev;
+ /*
+ * XXX: Add percpu se_lun->lun_ref reference count for TMR
+ */
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
@@ -186,9 +185,24 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
+bool target_lun_is_rdonly(struct se_cmd *cmd)
+{
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_dev_entry *deve;
+ bool ret;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
+ ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(target_lun_is_rdonly);
+
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
- * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
@@ -197,231 +211,238 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
{
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_port *port;
struct se_portal_group *tpg = nacl->se_tpg;
- u32 i;
-
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
-
- lun = deve->se_lun;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ lun = rcu_dereference(deve->se_lun);
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
}
- port = lun->lun_sep;
- if (!port) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
- if (port->sep_rtpi != rtpi)
+ if (lun->lun_rtpi != rtpi)
continue;
- atomic_inc_mb(&deve->pr_ref_count);
- spin_unlock_irq(&nacl->device_list_lock);
+ kref_get(&deve->pr_kref);
+ rcu_read_unlock();
return deve;
}
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return NULL;
}
-int core_free_device_list_for_node(
+void core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
- struct se_lun *lun;
- u32 i;
-
- if (!nacl->device_list)
- return 0;
-
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
-
- if (!deve->se_lun) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
- lun = deve->se_lun;
- spin_unlock_irq(&nacl->device_list_lock);
- core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
- spin_lock_irq(&nacl->device_list_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_lun *lun = rcu_dereference_check(deve->se_lun,
+ lockdep_is_held(&nacl->lun_entry_mutex));
+ core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
- spin_unlock_irq(&nacl->device_list_lock);
-
- array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
- nacl->device_list = NULL;
-
- return 0;
+ mutex_unlock(&nacl->lun_entry_mutex);
}
void core_update_device_list_access(
- u32 mapped_lun,
+ u64 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[mapped_lun];
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ mutex_lock(&nacl->lun_entry_mutex);
+ deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (deve) {
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ }
}
- spin_unlock_irq(&nacl->device_list_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
}
-/* core_enable_device_list_for_node():
- *
- *
+/*
+ * Called with rcu_read_lock or nacl->device_list_lock held.
*/
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
+{
+ struct se_dev_entry *deve;
+
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ if (deve->mapped_lun == mapped_lun)
+ return deve;
+
+ return NULL;
+}
+EXPORT_SYMBOL(target_nacl_find_deve);
+
+void target_pr_kref_release(struct kref *kref)
+{
+ struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
+ pr_kref);
+ complete(&deve->pr_comp);
+}
+
+static void
+target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
+ bool skip_new)
+{
+ struct se_dev_entry *tmp;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
+ if (skip_new && tmp == new)
+ continue;
+ core_scsi3_ua_allocate(tmp, 0x3F,
+ ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
+ }
+ rcu_read_unlock();
+}
+
int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
- u32 mapped_lun,
+ u64 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
- struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve;
-
- spin_lock_irq(&nacl->device_list_lock);
-
- deve = nacl->device_list[mapped_lun];
-
- /*
- * Check if the call is handling demo mode -> explicit LUN ACL
- * transition. This transition must be for the same struct se_lun
- * + mapped_lun that was setup in demo mode..
- */
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (deve->se_lun_acl != NULL) {
- pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explicit"
- " LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
+ struct se_dev_entry *orig, *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ pr_err("Unable to allocate se_dev_entry memory\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&new->ua_count, 0);
+ spin_lock_init(&new->ua_lock);
+ INIT_LIST_HEAD(&new->ua_list);
+ INIT_LIST_HEAD(&new->lun_link);
+
+ new->mapped_lun = mapped_lun;
+ kref_init(&new->pr_kref);
+ init_completion(&new->pr_comp);
+
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
+ new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ else
+ new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+
+ new->creation_time = get_jiffies_64();
+ new->attach_count++;
+
+ mutex_lock(&nacl->lun_entry_mutex);
+ orig = target_nacl_find_deve(nacl, mapped_lun);
+ if (orig && orig->se_lun) {
+ struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
+ lockdep_is_held(&nacl->lun_entry_mutex));
+
+ if (orig_lun != lun) {
+ pr_err("Existing orig->se_lun doesn't match new lun"
+ " for dynamic -> explicit NodeACL conversion:"
+ " %s\n", nacl->initiatorname);
+ mutex_unlock(&nacl->lun_entry_mutex);
+ kfree(new);
return -EINVAL;
}
- if (deve->se_lun != lun) {
- pr_err("struct se_dev_entry->se_lun does"
- " match passed struct se_lun for demo mode"
- " -> explicit LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- deve->se_lun_acl = lun_acl;
+ BUG_ON(orig->se_lun_acl != NULL);
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
- }
+ rcu_assign_pointer(new->se_lun, lun);
+ rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ hlist_del_rcu(&orig->link);
+ hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+ mutex_unlock(&nacl->lun_entry_mutex);
- spin_unlock_irq(&nacl->device_list_lock);
- return 0;
- }
+ spin_lock(&lun->lun_deve_lock);
+ list_del(&orig->lun_link);
+ list_add_tail(&new->lun_link, &lun->lun_deve_list);
+ spin_unlock(&lun->lun_deve_lock);
+
+ kref_put(&orig->pr_kref, target_pr_kref_release);
+ wait_for_completion(&orig->pr_comp);
- deve->se_lun = lun;
- deve->se_lun_acl = lun_acl;
- deve->mapped_lun = mapped_lun;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
-
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ target_luns_data_has_changed(nacl, new, true);
+ kfree_rcu(orig, rcu_head);
+ return 0;
}
- deve->creation_time = get_jiffies_64();
- deve->attach_count++;
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_assign_pointer(new->se_lun, lun);
+ rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+ mutex_unlock(&nacl->lun_entry_mutex);
- spin_lock_bh(&port->sep_alua_lock);
- list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_lock(&lun->lun_deve_lock);
+ list_add_tail(&new->lun_link, &lun->lun_deve_list);
+ spin_unlock(&lun->lun_deve_lock);
+ target_luns_data_has_changed(nacl, new, true);
return 0;
}
-/* core_disable_device_list_for_node():
- *
- *
+/*
+ * Called with se_node_acl->lun_entry_mutex held.
*/
-int core_disable_device_list_for_node(
+void core_disable_device_list_for_node(
struct se_lun *lun,
- struct se_lun_acl *lun_acl,
- u32 mapped_lun,
- u32 lun_access,
+ struct se_dev_entry *orig,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
- struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve = nacl->device_list[mapped_lun];
-
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
/*
* If the MappedLUN entry is being disabled, the entry in
- * port->sep_alua_list must be removed now before clearing the
+ * lun->lun_deve_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
- * struct se_lun_acl, but we remove deve->alua_port_list from
- * port->sep_alua_list. This also means that active UAs and
+ * struct se_lun_acl, but we remove deve->lun_link from
+ * lun->lun_deve_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
- spin_lock_bh(&port->sep_alua_lock);
- list_del(&deve->alua_port_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_lock(&lun->lun_deve_lock);
+ list_del(&orig->lun_link);
+ spin_unlock(&lun->lun_deve_lock);
/*
- * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
- * PR operation to complete.
+ * Disable struct se_dev_entry LUN ACL mapping
*/
- while (atomic_read(&deve->pr_ref_count) != 0)
- cpu_relax();
-
- spin_lock_irq(&nacl->device_list_lock);
+ core_scsi3_ua_release_all(orig);
+
+ hlist_del_rcu(&orig->link);
+ clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
+ rcu_assign_pointer(orig->se_lun, NULL);
+ rcu_assign_pointer(orig->se_lun_acl, NULL);
+ orig->lun_flags = 0;
+ orig->creation_time = 0;
+ orig->attach_count--;
/*
- * Disable struct se_dev_entry LUN ACL mapping
+ * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
+ * or REGISTER_AND_MOVE PR operation to complete.
*/
- core_scsi3_ua_release_all(deve);
- deve->se_lun = NULL;
- deve->se_lun_acl = NULL;
- deve->lun_flags = 0;
- deve->creation_time = 0;
- deve->attach_count--;
- spin_unlock_irq(&nacl->device_list_lock);
-
- core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
- return 0;
+ kref_put(&orig->pr_kref, target_pr_kref_release);
+ wait_for_completion(&orig->pr_comp);
+
+ kfree_rcu(orig, rcu_head);
+
+ core_scsi3_free_pr_reg_from_nacl(dev, nacl);
+ target_luns_data_has_changed(nacl, NULL, false);
}
/* core_clear_lun_from_tpg():
@@ -432,53 +453,35 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
- u32 i;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
- spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
- if (lun != deve->se_lun)
- continue;
- spin_unlock_irq(&nacl->device_list_lock);
+ mutex_lock(&nacl->lun_entry_mutex);
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
+ lockdep_is_held(&nacl->lun_entry_mutex));
- core_disable_device_list_for_node(lun, NULL,
- deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
- nacl, tpg);
+ if (lun != tmp_lun)
+ continue;
- spin_lock_irq(&nacl->device_list_lock);
+ core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
- spin_unlock_irq(&nacl->device_list_lock);
-
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
}
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
}
-static struct se_port *core_alloc_port(struct se_device *dev)
+int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
{
- struct se_port *port, *port_tmp;
-
- port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
- if (!port) {
- pr_err("Unable to allocate struct se_port\n");
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&port->sep_alua_list);
- INIT_LIST_HEAD(&port->sep_list);
- atomic_set(&port->sep_tg_pt_secondary_offline, 0);
- spin_lock_init(&port->sep_alua_lock);
- mutex_init(&port->sep_tg_pt_md_mutex);
+ struct se_lun *tmp;
spin_lock(&dev->se_port_lock);
- if (dev->dev_port_count == 0x0000ffff) {
+ if (dev->export_count == 0x0000ffff) {
pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
- return ERR_PTR(-ENOSPC);
+ return -ENOSPC;
}
again:
/*
@@ -493,133 +496,23 @@ again:
* 2h Relative port 2, historically known as port B
* 3h to FFFFh Relative port 3 through 65 535
*/
- port->sep_rtpi = dev->dev_rpti_counter++;
- if (!port->sep_rtpi)
+ lun->lun_rtpi = dev->dev_rpti_counter++;
+ if (!lun->lun_rtpi)
goto again;
- list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+ list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
/*
* Make sure RELATIVE TARGET PORT IDENTIFIER is unique
* for 16-bit wrap..
*/
- if (port->sep_rtpi == port_tmp->sep_rtpi)
+ if (lun->lun_rtpi == tmp->lun_rtpi)
goto again;
}
spin_unlock(&dev->se_port_lock);
- return port;
-}
-
-static void core_export_port(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_port *port,
- struct se_lun *lun)
-{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
-
- spin_lock(&dev->se_port_lock);
- spin_lock(&lun->lun_sep_lock);
- port->sep_tpg = tpg;
- port->sep_lun = lun;
- lun->lun_sep = port;
- spin_unlock(&lun->lun_sep_lock);
-
- list_add_tail(&port->sep_list, &dev->dev_sep_list);
- spin_unlock(&dev->se_port_lock);
-
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
- !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
- tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
- if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
- pr_err("Unable to allocate t10_alua_tg_pt"
- "_gp_member_t\n");
- return;
- }
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- dev->t10_alua.default_tg_pt_gp);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- pr_debug("%s/%s: Adding to default ALUA Target Port"
- " Group: alua/default_tg_pt_gp\n",
- dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
- }
-
- dev->dev_port_count++;
- port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
-}
-
-/*
- * Called with struct se_device->se_port_lock spinlock held.
- */
-static void core_release_port(struct se_device *dev, struct se_port *port)
- __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
-{
- /*
- * Wait for any port reference for PR ALL_TG_PT=1 operation
- * to complete in __core_scsi3_alloc_registration()
- */
- spin_unlock(&dev->se_port_lock);
- if (atomic_read(&port->sep_tg_pt_ref_cnt))
- cpu_relax();
- spin_lock(&dev->se_port_lock);
-
- core_alua_free_tg_pt_gp_mem(port);
-
- list_del(&port->sep_list);
- dev->dev_port_count--;
- kfree(port);
-}
-
-int core_dev_export(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- struct se_hba *hba = dev->se_hba;
- struct se_port *port;
-
- port = core_alloc_port(dev);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- lun->lun_se_dev = dev;
-
- spin_lock(&hba->device_lock);
- dev->export_count++;
- spin_unlock(&hba->device_lock);
-
- core_export_port(dev, tpg, port, lun);
return 0;
}
-void core_dev_unexport(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- struct se_hba *hba = dev->se_hba;
- struct se_port *port = lun->lun_sep;
-
- spin_lock(&lun->lun_sep_lock);
- if (lun->lun_se_dev == NULL) {
- spin_unlock(&lun->lun_sep_lock);
- return;
- }
- spin_unlock(&lun->lun_sep_lock);
-
- spin_lock(&dev->se_port_lock);
- core_release_port(dev, port);
- spin_unlock(&dev->se_port_lock);
-
- spin_lock(&hba->device_lock);
- dev->export_count--;
- spin_unlock(&hba->device_lock);
-
- lun->lun_sep = NULL;
- lun->lun_se_dev = NULL;
-}
-
static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
@@ -651,556 +544,19 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
return aligned_max_sectors;
}
-bool se_dev_check_wce(struct se_device *dev)
-{
- bool wce = false;
-
- if (dev->transport->get_write_cache)
- wce = dev->transport->get_write_cache(dev);
- else if (dev->dev_attrib.emulate_write_cache > 0)
- wce = true;
-
- return wce;
-}
-
-int se_dev_set_max_unmap_lba_count(
- struct se_device *dev,
- u32 max_unmap_lba_count)
-{
- dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
- pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, dev->dev_attrib.max_unmap_lba_count);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
-
-int se_dev_set_max_unmap_block_desc_count(
- struct se_device *dev,
- u32 max_unmap_block_desc_count)
-{
- dev->dev_attrib.max_unmap_block_desc_count =
- max_unmap_block_desc_count;
- pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, dev->dev_attrib.max_unmap_block_desc_count);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
-
-int se_dev_set_unmap_granularity(
- struct se_device *dev,
- u32 unmap_granularity)
-{
- dev->dev_attrib.unmap_granularity = unmap_granularity;
- pr_debug("dev[%p]: Set unmap_granularity: %u\n",
- dev, dev->dev_attrib.unmap_granularity);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_unmap_granularity);
-
-int se_dev_set_unmap_granularity_alignment(
- struct se_device *dev,
- u32 unmap_granularity_alignment)
-{
- dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
- pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, dev->dev_attrib.unmap_granularity_alignment);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
-
-int se_dev_set_max_write_same_len(
- struct se_device *dev,
- u32 max_write_same_len)
-{
- dev->dev_attrib.max_write_same_len = max_write_same_len;
- pr_debug("dev[%p]: Set max_write_same_len: %u\n",
- dev, dev->dev_attrib.max_write_same_len);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_write_same_len);
-
-static void dev_set_t10_wwn_model_alias(struct se_device *dev)
-{
- const char *configname;
-
- configname = config_item_name(&dev->dev_group.cg_item);
- if (strlen(configname) >= 16) {
- pr_warn("dev[%p]: Backstore name '%s' is too long for "
- "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
- configname);
- }
- snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
-}
-
-int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change model alias"
- " while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
-
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- dev_set_t10_wwn_model_alias(dev);
- } else {
- strncpy(&dev->t10_wwn.model[0],
- dev->transport->inquiry_prod, 16);
- }
- dev->dev_attrib.emulate_model_alias = flag;
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
-
-int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_dpo);
-
-int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- if (flag &&
- dev->transport->get_write_cache) {
- pr_warn("emulate_fua_write not supported for this device, ignoring\n");
- return 0;
- }
- if (dev->export_count) {
- pr_err("emulate_fua_write cannot be changed with active"
- " exports: %d\n", dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_fua_write = flag;
- pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, dev->dev_attrib.emulate_fua_write);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
-
-int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
-
-int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- if (flag &&
- dev->transport->get_write_cache) {
- pr_err("emulate_write_cache not supported for this device\n");
- return -EINVAL;
- }
- if (dev->export_count) {
- pr_err("emulate_write_cache cannot be changed with active"
- " exports: %d\n", dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_write_cache = flag;
- pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, dev->dev_attrib.emulate_write_cache);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
-
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1) && (flag != 2)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " UA_INTRLCK_CTRL while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
- pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
-
-int se_dev_set_emulate_tas(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device TAS while"
- " export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_tas = flag;
- pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tas);
-
-int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- /*
- * We expect this value to be non-zero when generic Block Layer
- * Discard supported is detected iblock_create_virtdevice().
- */
- if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
-
- dev->dev_attrib.emulate_tpu = flag;
- pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
- dev, flag);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tpu);
-
-int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- /*
- * We expect this value to be non-zero when generic Block Layer
- * Discard supported is detected iblock_create_virtdevice().
- */
- if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
-
- dev->dev_attrib.emulate_tpws = flag;
- pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
- dev, flag);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tpws);
-
-int se_dev_set_emulate_caw(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_caw = flag;
- pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
- dev, flag);
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_caw);
-
-int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_3pc = flag;
- pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
- dev, flag);
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_3pc);
-
-int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
-{
- int rc, old_prot = dev->dev_attrib.pi_prot_type;
-
- if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
- pr_err("Illegal value %d for pi_prot_type\n", flag);
- return -EINVAL;
- }
- if (flag == 2) {
- pr_err("DIF TYPE2 protection currently not supported\n");
- return -ENOSYS;
- }
- if (dev->dev_attrib.hw_pi_prot_type) {
- pr_warn("DIF protection enabled on underlying hardware,"
- " ignoring\n");
- return 0;
- }
- if (!dev->transport->init_prot || !dev->transport->free_prot) {
- /* 0 is only allowed value for non-supporting backends */
- if (flag == 0)
- return 0;
-
- pr_err("DIF protection not supported by backend: %s\n",
- dev->transport->name);
- return -ENOSYS;
- }
- if (!(dev->dev_flags & DF_CONFIGURED)) {
- pr_err("DIF protection requires device to be configured\n");
- return -ENODEV;
- }
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device PROT type while"
- " export_count is %d\n", dev, dev->export_count);
- return -EINVAL;
- }
-
- dev->dev_attrib.pi_prot_type = flag;
-
- if (flag && !old_prot) {
- rc = dev->transport->init_prot(dev);
- if (rc) {
- dev->dev_attrib.pi_prot_type = old_prot;
- return rc;
- }
-
- } else if (!flag && old_prot) {
- dev->transport->free_prot(dev);
- }
- pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_pi_prot_type);
-
-int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
-{
- int rc;
-
- if (!flag)
- return 0;
-
- if (flag != 1) {
- pr_err("Illegal value %d for pi_prot_format\n", flag);
- return -EINVAL;
- }
- if (!dev->transport->format_prot) {
- pr_err("DIF protection format not supported by backend %s\n",
- dev->transport->name);
- return -ENOSYS;
- }
- if (!(dev->dev_flags & DF_CONFIGURED)) {
- pr_err("DIF protection format requires device to be configured\n");
- return -ENODEV;
- }
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to format SE Device PROT type while"
- " export_count is %d\n", dev, dev->export_count);
- return -EINVAL;
- }
-
- rc = dev->transport->format_prot(dev);
- if (rc)
- return rc;
-
- pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_pi_prot_format);
-
-int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.enforce_pr_isids = flag;
- pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
-
-int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -EINVAL;
- }
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to set force_pr_aptpl while"
- " export_count is %d\n", dev, dev->export_count);
- return -EINVAL;
- }
-
- dev->dev_attrib.force_pr_aptpl = flag;
- pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
-
-int se_dev_set_is_nonrot(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.is_nonrot = flag;
- pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
- dev, flag);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_is_nonrot);
-
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
-{
- if (flag != 0) {
- printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
- " reordering not implemented\n", dev);
- return -ENOSYS;
- }
- dev->dev_attrib.emulate_rest_reord = flag;
- pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
-
-/*
- * Note, this can only be called on unexported SE Device Object.
- */
-int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device TCQ while"
- " export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!queue_depth) {
- pr_err("dev[%p]: Illegal ZERO value for queue"
- "_depth\n", dev);
- return -EINVAL;
- }
-
- if (queue_depth > dev->dev_attrib.queue_depth) {
- if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth:"
- " %u exceeds TCM/SE_Device MAX"
- " TCQ: %u\n", dev, queue_depth,
- dev->dev_attrib.hw_queue_depth);
- return -EINVAL;
- }
- }
- dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
- pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
- dev, queue_depth);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_queue_depth);
-
-int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " optimal_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than hw_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
-
- dev->dev_attrib.optimal_sectors = optimal_sectors;
- pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
- dev, optimal_sectors);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_optimal_sectors);
-
-int se_dev_set_block_size(struct se_device *dev, u32 block_size)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device block_size"
- " while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
-
- if ((block_size != 512) &&
- (block_size != 1024) &&
- (block_size != 2048) &&
- (block_size != 4096)) {
- pr_err("dev[%p]: Illegal value for block_device: %u"
- " for SE device, must be 512, 1024, 2048 or 4096\n",
- dev, block_size);
- return -EINVAL;
- }
-
- dev->dev_attrib.block_size = block_size;
- pr_debug("dev[%p]: SE Device block_size changed to %u\n",
- dev, block_size);
-
- if (dev->dev_attrib.max_bytes_per_io)
- dev->dev_attrib.hw_max_sectors =
- dev->dev_attrib.max_bytes_per_io / block_size;
-
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_block_size);
-
-struct se_lun *core_dev_add_lun(
+int core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
- u32 unpacked_lun)
+ struct se_lun *lun)
{
- struct se_lun *lun;
int rc;
- lun = core_tpg_alloc_lun(tpg, unpacked_lun);
- if (IS_ERR(lun))
- return lun;
-
rc = core_tpg_add_lun(tpg, lun,
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
- return ERR_PTR(rc);
+ return rc;
- pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
@@ -1210,20 +566,19 @@ struct se_lun *core_dev_add_lun(
*/
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
- spin_lock_irq(&tpg->acl_node_lock);
+
+ mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
- spin_unlock_irq(&tpg->acl_node_lock);
- core_tpg_add_node_to_devs(acl, tpg);
- spin_lock_irq(&tpg->acl_node_lock);
+ core_tpg_add_node_to_devs(acl, tpg, lun);
}
}
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
}
- return lun;
+ return 0;
}
/* core_dev_del_lun():
@@ -1234,7 +589,7 @@ void core_dev_del_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
+ pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name());
@@ -1242,72 +597,10 @@ void core_dev_del_lun(
core_tpg_remove_lun(tpg, lun);
}
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
-{
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
- "_PER_TPG-1: %u for Target Portal Group: %hu\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- lun = tpg->tpg_lun_list[unpacked_lun];
-
- if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
- pr_err("%s Logical Unit Number: %u is not free on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- spin_unlock(&tpg->tpg_lun_lock);
-
- return lun;
-}
-
-/* core_dev_get_lun():
- *
- *
- */
-static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
-{
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
- "_TPG-1: %u for Target Portal Group: %hu\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- lun = tpg->tpg_lun_list[unpacked_lun];
-
- if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- pr_err("%s Logical Unit Number: %u is not active on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- spin_unlock(&tpg->tpg_lun_lock);
-
- return lun;
-}
-
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_node_acl *nacl,
- u32 mapped_lun,
+ u64 mapped_lun,
int *ret)
{
struct se_lun_acl *lacl;
@@ -1325,7 +618,6 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
return NULL;
}
- INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
@@ -1337,22 +629,16 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
- u32 unpacked_lun,
+ struct se_lun *lun,
u32 lun_access)
{
- struct se_lun *lun;
- struct se_node_acl *nacl;
-
- lun = core_dev_get_lun(tpg, unpacked_lun);
- if (!lun) {
- pr_err("%s Logical Unit Number: %u is not active on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- return -EINVAL;
- }
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
- nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
@@ -1366,52 +652,40 @@ int core_dev_add_initiator_node_lun_acl(
lun_access, nacl, tpg) < 0)
return -EINVAL;
- spin_lock(&lun->lun_acl_lock);
- list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
- atomic_inc_mb(&lun->lun_acl_count);
- spin_unlock(&lun->lun_acl_lock);
-
- pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
+ core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
lacl->mapped_lun);
return 0;
}
-/* core_dev_del_initiator_node_lun_acl():
- *
- *
- */
int core_dev_del_initiator_node_lun_acl(
- struct se_portal_group *tpg,
struct se_lun *lun,
struct se_lun_acl *lacl)
{
+ struct se_portal_group *tpg = lun->lun_tpg;
struct se_node_acl *nacl;
+ struct se_dev_entry *deve;
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
- spin_lock(&lun->lun_acl_lock);
- list_del(&lacl->lacl_list);
- atomic_dec_mb(&lun->lun_acl_count);
- spin_unlock(&lun->lun_acl_lock);
-
- core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
- lacl->se_lun = NULL;
+ mutex_lock(&nacl->lun_entry_mutex);
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (deve)
+ core_disable_device_list_for_node(lun, deve, nacl, tpg);
+ mutex_unlock(&nacl->lun_entry_mutex);
- pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
- " InitiatorNode: %s Mapped LUN: %u\n",
+ pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
+ " InitiatorNode: %s Mapped LUN: %llu\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
@@ -1424,7 +698,7 @@ void core_dev_free_initiator_node_lun_acl(
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
@@ -1473,14 +747,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
struct se_device *dev;
struct se_lun *xcopy_lun;
- dev = hba->transport->alloc_device(hba, name);
+ dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
- dev->transport = hba->transport;
+ dev->transport = hba->backend->ops;
dev->prot_length = sizeof(struct se_dif_v1_tuple);
+ dev->hba_index = hba->hba_index;
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1513,9 +788,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.da_dev = dev;
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
- dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
- dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
- dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->dev_attrib.emulate_dpo = 1;
+ dev->dev_attrib.emulate_fua_write = 1;
+ dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
@@ -1537,12 +812,12 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
xcopy_lun = &dev->xcopy_lun;
- xcopy_lun->lun_se_dev = dev;
- init_completion(&xcopy_lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
- spin_lock_init(&xcopy_lun->lun_acl_lock);
- spin_lock_init(&xcopy_lun->lun_sep_lock);
+ rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_ref_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+ INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+ mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+ xcopy_lun->lun_tpg = &xcopy_pt_tpg;
return dev;
}
@@ -1679,7 +954,7 @@ int core_dev_setup_virtual_lun0(void)
goto out_free_hba;
}
- hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
+ hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);
if (ret)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 1f7886bb16bf..48a36989c1a6 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,7 +36,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_internal.h"
@@ -46,27 +45,25 @@
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
- struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
- struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ struct config_item_type *cit = &tf->tf_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
- cit->ct_owner = tf->tf_module; \
+ cit->ct_owner = tf->tf_ops->module; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
- struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
- struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
- struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \
+ struct config_item_type *cit = &tf->tf_##_name##_cit; \
+ struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = attrs; \
- cit->ct_owner = tf->tf_module; \
+ cit->ct_owner = tf->tf_ops->module; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
@@ -83,7 +80,7 @@ static int target_fabric_mappedlun_link(
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
- int ret = 0, lun_access;
+ int lun_access;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
@@ -93,12 +90,11 @@ static int target_fabric_mappedlun_link(
/*
* Ensure that the source port exists
*/
- if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
- pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
- "_tpg does not exist\n");
+ if (!lun->lun_se_dev) {
+ pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
- se_tpg = lun->lun_sep->sep_tpg;
+ se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item;
@@ -125,49 +121,35 @@ static int target_fabric_mappedlun_link(
* which be will write protected (READ-ONLY) when
* tpg_1/attrib/demo_mode_write_protect=1
*/
- spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
- deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+ rcu_read_lock();
+ deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
+ if (deve)
lun_access = deve->lun_flags;
else
lun_access =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
- spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+ rcu_read_unlock();
/*
* Determine the actual mapped LUN value user wants..
*
* This value is what the SCSI Initiator actually sees the
- * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+ * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
- ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
- lun->unpacked_lun, lun_access);
-
- return (ret < 0) ? -EINVAL : 0;
+ return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
}
static int target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
- struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
- struct se_portal_group *se_tpg;
- /*
- * Determine if the underlying MappedLUN has already been released..
- */
- if (!deve->se_lun)
- return 0;
-
- lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
- se_tpg = lun->lun_sep->sep_tpg;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
- core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
- return 0;
+ return core_dev_del_initiator_node_lun_acl(lun, lacl);
}
CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
@@ -183,14 +165,15 @@ static ssize_t target_fabric_mappedlun_show_write_protect(
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- ssize_t len;
+ ssize_t len = 0;
- spin_lock_irq(&se_nacl->device_list_lock);
- deve = se_nacl->device_list[lacl->mapped_lun];
- len = sprintf(page, "%d\n",
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
- 1 : 0);
- spin_unlock_irq(&se_nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
+ if (deve) {
+ len = sprintf(page, "%d\n",
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+ }
+ rcu_read_unlock();
return len;
}
@@ -218,7 +201,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
lacl->se_lun_nacl);
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
- " Mapped LUN: %u Write Protect bit to %s\n",
+ " Mapped LUN: %llu Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
@@ -338,7 +321,7 @@ static struct config_group *target_fabric_make_mappedlun(
struct config_item *acl_ci;
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
char *buf;
- unsigned long mapped_lun;
+ unsigned long long mapped_lun;
int ret = 0;
acl_ci = &group->cg_item;
@@ -366,21 +349,9 @@ static struct config_group *target_fabric_make_mappedlun(
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
- ret = kstrtoul(buf + 4, 0, &mapped_lun);
+ ret = kstrtoull(buf + 4, 0, &mapped_lun);
if (ret)
goto out;
- if (mapped_lun > UINT_MAX) {
- ret = -EINVAL;
- goto out;
- }
- if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
- "-1: %u for Target Portal Group: %u\n", mapped_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- ret = -EINVAL;
- goto out;
- }
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
mapped_lun, &ret);
@@ -399,9 +370,9 @@ static struct config_group *target_fabric_make_mappedlun(
}
config_group_init_type_name(&lacl->se_lun_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
+ &tf->tf_tpg_mappedlun_cit);
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
- "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
+ "statistics", &tf->tf_tpg_mappedlun_stat_cit);
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
@@ -458,10 +429,11 @@ static void target_fabric_nacl_base_release(struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
- tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+ if (tf->tf_ops->fabric_cleanup_nodeacl)
+ tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+ core_tpg_del_initiator_node_acl(se_nacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
@@ -501,15 +473,18 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
- if (!tf->tf_ops.fabric_make_nodeacl) {
- pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
- return ERR_PTR(-ENOSYS);
- }
-
- se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
if (IS_ERR(se_nacl))
return ERR_CAST(se_nacl);
+ if (tf->tf_ops->fabric_init_nodeacl) {
+ int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+ if (ret) {
+ core_tpg_del_initiator_node_acl(se_nacl);
+ return ERR_PTR(ret);
+ }
+ }
+
nacl_cg = &se_nacl->acl_group;
nacl_cg->default_groups = se_nacl->acl_default_groups;
nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
@@ -519,16 +494,15 @@ static struct config_group *target_fabric_make_nodeacl(
nacl_cg->default_groups[4] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
+ &tf->tf_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
- &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
+ &tf->tf_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
- &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
+ &tf->tf_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
- &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
+ &tf->tf_tpg_nacl_param_cit);
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
- "fabric_statistics",
- &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
+ "fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
return &se_nacl->acl_group;
}
@@ -575,7 +549,7 @@ static void target_fabric_np_base_release(struct config_item *item)
struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- tf->tf_ops.fabric_drop_np(se_tpg_np);
+ tf->tf_ops->fabric_drop_np(se_tpg_np);
}
static struct configfs_item_operations target_fabric_np_base_item_ops = {
@@ -599,18 +573,18 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
- if (!tf->tf_ops.fabric_make_np) {
+ if (!tf->tf_ops->fabric_make_np) {
pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
- se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+ se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name);
if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
+ &tf->tf_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
@@ -654,10 +628,10 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
- return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+ return core_alua_show_tg_pt_gp_info(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
@@ -665,10 +639,10 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page,
size_t count)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
- return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+ return core_alua_store_tg_pt_gp_info(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
@@ -680,7 +654,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@@ -691,7 +665,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page,
size_t count)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@@ -706,7 +680,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@@ -717,7 +691,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page,
size_t count)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@@ -732,7 +706,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@@ -743,7 +717,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page,
size_t count)
{
- if (!lun || !lun->lun_sep)
+ if (!lun || !lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -769,7 +743,6 @@ static int target_fabric_port_link(
struct config_item *tpg_ci;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- struct se_lun *lun_p;
struct se_portal_group *se_tpg;
struct se_device *dev =
container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
@@ -797,20 +770,19 @@ static int target_fabric_port_link(
return -EEXIST;
}
- lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
- if (IS_ERR(lun_p)) {
- pr_err("core_dev_add_lun() failed\n");
- ret = PTR_ERR(lun_p);
+ ret = core_dev_add_lun(se_tpg, dev, lun);
+ if (ret) {
+ pr_err("core_dev_add_lun() failed: %d\n", ret);
goto out;
}
- if (tf->tf_ops.fabric_post_link) {
+ if (tf->tf_ops->fabric_post_link) {
/*
* Call the optional fabric_post_link() to allow a
* fabric module to setup any additional state once
* core_dev_add_lun() has been called..
*/
- tf->tf_ops.fabric_post_link(se_tpg, lun);
+ tf->tf_ops->fabric_post_link(se_tpg, lun);
}
return 0;
@@ -824,25 +796,34 @@ static int target_fabric_port_unlink(
{
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+ struct se_portal_group *se_tpg = lun->lun_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- if (tf->tf_ops.fabric_pre_unlink) {
+ if (tf->tf_ops->fabric_pre_unlink) {
/*
* Call the optional fabric_pre_unlink() to allow a
* fabric module to release any additional stat before
* core_dev_del_lun() is called.
*/
- tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+ tf->tf_ops->fabric_pre_unlink(se_tpg, lun);
}
core_dev_del_lun(se_tpg, lun);
return 0;
}
+static void target_fabric_port_release(struct config_item *item)
+{
+ struct se_lun *lun = container_of(to_config_group(item),
+ struct se_lun, lun_group);
+
+ kfree_rcu(lun, rcu_head);
+}
+
static struct configfs_item_operations target_fabric_port_item_ops = {
.show_attribute = target_fabric_port_attr_show,
.store_attribute = target_fabric_port_attr_store,
+ .release = target_fabric_port_release,
.allow_link = target_fabric_port_link,
.drop_link = target_fabric_port_unlink,
};
@@ -887,7 +868,7 @@ static struct config_group *target_fabric_make_lun(
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
- unsigned long unpacked_lun;
+ unsigned long long unpacked_lun;
int errno;
if (strstr(name, "lun_") != name) {
@@ -895,28 +876,27 @@ static struct config_group *target_fabric_make_lun(
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
- errno = kstrtoul(name + 4, 0, &unpacked_lun);
+ errno = kstrtoull(name + 4, 0, &unpacked_lun);
if (errno)
return ERR_PTR(errno);
- if (unpacked_lun > UINT_MAX)
- return ERR_PTR(-EINVAL);
- lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
- if (!lun)
- return ERR_PTR(-EINVAL);
+ lun = core_tpg_alloc_lun(se_tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return ERR_CAST(lun);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n");
+ kfree(lun);
return ERR_PTR(-ENOMEM);
}
config_group_init_type_name(&lun->lun_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_port_cit);
+ &tf->tf_tpg_port_cit);
config_group_init_type_name(&lun->port_stat_grps.stat_group,
- "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
+ "statistics", &tf->tf_tpg_port_stat_cit);
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
@@ -926,6 +906,7 @@ static struct config_group *target_fabric_make_lun(
if (!port_stat_grp->default_groups) {
pr_err("Unable to allocate port_stat_grp->default_groups\n");
kfree(lun_cg->default_groups);
+ kfree(lun);
return ERR_PTR(-ENOMEM);
}
target_stat_setup_port_default_groups(lun);
@@ -1023,7 +1004,7 @@ static void target_fabric_tpg_release(struct config_item *item)
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct target_fabric_configfs *tf = wwn->wwn_tf;
- tf->tf_ops.fabric_drop_tpg(se_tpg);
+ tf->tf_ops->fabric_drop_tpg(se_tpg);
}
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
@@ -1046,12 +1027,12 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
- if (!tf->tf_ops.fabric_make_tpg) {
- pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
+ if (!tf->tf_ops->fabric_make_tpg) {
+ pr_err("tf->tf_ops->fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
@@ -1067,19 +1048,19 @@ static struct config_group *target_fabric_make_tpg(
se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_base_cit);
+ &tf->tf_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
- &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
+ &tf->tf_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
- &tf->tf_cit_tmpl.tfc_tpg_np_cit);
+ &tf->tf_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
- &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
+ &tf->tf_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
- &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
+ &tf->tf_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
- &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
+ &tf->tf_tpg_auth_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
- &tf->tf_cit_tmpl.tfc_tpg_param_cit);
+ &tf->tf_tpg_param_cit);
return &se_tpg->tpg_group;
}
@@ -1112,7 +1093,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
- tf->tf_ops.fabric_drop_wwn(wwn);
+ tf->tf_ops->fabric_drop_wwn(wwn);
}
static struct configfs_item_operations target_fabric_tpg_item_ops = {
@@ -1148,12 +1129,12 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
- if (!tf->tf_ops.fabric_make_wwn) {
+ if (!tf->tf_ops->fabric_make_wwn) {
pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
- wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+ wwn = tf->tf_ops->fabric_make_wwn(tf, group, name);
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
@@ -1165,10 +1146,9 @@ static struct config_group *target_fabric_make_wwn(
wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
wwn->wwn_group.default_groups[1] = NULL;
- config_group_init_type_name(&wwn->wwn_group, name,
- &tf->tf_cit_tmpl.tfc_tpg_cit);
+ config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
- &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
+ &tf->tf_wwn_fabric_stats_cit);
return &wwn->wwn_group;
}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 41f4f270f919..cb6497ce4b61 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -24,6 +24,11 @@
*
******************************************************************************/
+/*
+ * See SPC4, section 7.5 "Protocol specific parameters" for details
+ * on the formats implemented in this file.
+ */
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
@@ -34,124 +39,30 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
-/*
- * Handlers for Serial Attached SCSI (SAS)
- */
-u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- /*
- * Return a SAS Serial SCSI Protocol identifier for loopback operations
- * This is defined in section 7.5.1 Table 362 in spc4r17
- */
- return 0x6;
-}
-EXPORT_SYMBOL(sas_get_fabric_proto_ident);
-u32 sas_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
+static int sas_get_pr_transport_id(
+ struct se_node_acl *nacl,
int *format_code,
unsigned char *buf)
{
- unsigned char *ptr;
int ret;
- /*
- * Set PROTOCOL IDENTIFIER to 6h for SAS
- */
- buf[0] = 0x06;
- /*
- * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
- * over SAS Serial SCSI Protocol
- */
- ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
-
- ret = hex2bin(&buf[4], ptr, 8);
- if (ret < 0)
- pr_debug("sas transport_id: invalid hex string\n");
-
- /*
- * The SAS Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id);
-
-u32 sas_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- /*
- * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
- * over SAS Serial SCSI Protocol
- *
- * The SAS Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id_len);
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-char *sas_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- /*
- * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
- * for initiator ports using SCSI over SAS Serial SCSI Protocol
- *
- * The TransportID for a SAS Initiator Port is of fixed size of
- * 24 bytes, and SAS does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
-
- return (char *)&buf[4];
-}
-EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
-
-/*
- * Handlers for Fibre Channel Protocol (FCP)
- */
-u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
-}
-EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+ /* Skip over 'naa. prefix */
+ ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8);
+ if (ret) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
-u32 fc_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- /*
- * The FC Transport ID is a hardcoded 24-byte length
- */
return 24;
}
-EXPORT_SYMBOL(fc_get_pr_transport_id_len);
-u32 fc_get_pr_transport_id(
- struct se_portal_group *se_tpg,
+static int fc_get_pr_transport_id(
struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
@@ -160,24 +71,20 @@ u32 fc_get_pr_transport_id(
u32 off = 8;
/*
- * PROTOCOL IDENTIFIER is 0h for FCP-2
- *
- * From spc4r17, 7.5.4.2 TransportID for initiator ports using
- * SCSI over Fibre Channel
- *
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
-
for (i = 0; i < 24; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
ret = hex2bin(&buf[off++], &ptr[i], 1);
- if (ret < 0)
- pr_debug("fc transport_id: invalid hex string\n");
+ if (ret < 0) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
i += 2;
}
/*
@@ -185,42 +92,52 @@ u32 fc_get_pr_transport_id(
*/
return 24;
}
-EXPORT_SYMBOL(fc_get_pr_transport_id);
-char *fc_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
+static int sbp_get_pr_transport_id(
+ struct se_node_acl *nacl,
+ int *format_code,
+ unsigned char *buf)
{
- /*
- * The TransportID for a FC N Port is of fixed size of
- * 24 bytes, and FC does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
+ int ret;
- return (char *)&buf[8];
-}
-EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+ ret = hex2bin(&buf[8], nacl->initiatorname, 8);
+ if (ret) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
-/*
- * Handlers for Internet Small Computer Systems Interface (iSCSI)
- */
+ return 24;
+}
-u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static int srp_get_pr_transport_id(
+ struct se_node_acl *nacl,
+ int *format_code,
+ unsigned char *buf)
{
- /*
- * This value is defined for "Internet SCSI (iSCSI)"
- * in spc4r17 section 7.5.1 Table 362
- */
- return 0x5;
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int rc;
+
+ p = nacl->initiatorname;
+ if (strncasecmp(p, "0x", 2) == 0)
+ p += 2;
+ len = strlen(p);
+ if (len % 2)
+ return -EINVAL;
+
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(buf + 8, 0, leading_zero_bytes);
+ rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
+ if (rc < 0) {
+ pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ return rc;
+ }
+
+ return 24;
}
-EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
-u32 iscsi_get_pr_transport_id(
- struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
@@ -231,10 +148,6 @@ u32 iscsi_get_pr_transport_id(
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
- * Set PROTOCOL IDENTIFIER to 5h for iSCSI
- */
- buf[0] = 0x05;
- /*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI.
*
@@ -313,10 +226,8 @@ u32 iscsi_get_pr_transport_id(
return len;
}
-EXPORT_SYMBOL(iscsi_get_pr_transport_id);
-u32 iscsi_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id_len(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
@@ -359,9 +270,8 @@ u32 iscsi_get_pr_transport_id_len(
return len;
}
-EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
-char *iscsi_parse_pr_out_transport_id(
+static char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
@@ -448,4 +358,79 @@ char *iscsi_parse_pr_out_transport_id(
return (char *)&buf[4];
}
-EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
+
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code)
+{
+ switch (nacl->se_tpg->proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ case SCSI_PROTOCOL_SBP:
+ case SCSI_PROTOCOL_SRP:
+ case SCSI_PROTOCOL_SAS:
+ break;
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Most transports use a fixed length 24 byte identifier.
+ */
+ *format_code = 0;
+ return 24;
+}
+
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code,
+ unsigned char *buf)
+{
+ switch (nacl->se_tpg->proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_SBP:
+ return sbp_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_SRP:
+ return srp_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(nacl, pr_reg, format_code,
+ buf);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+ return -EINVAL;
+ }
+}
+
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ const char *buf, u32 *out_tid_len, char **port_nexus_ptr)
+{
+ u32 offset;
+
+ switch (tpg->proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+ * for initiator ports using SCSI over SAS Serial SCSI Protocol.
+ */
+ offset = 4;
+ break;
+ case SCSI_PROTOCOL_SBP:
+ case SCSI_PROTOCOL_SRP:
+ case SCSI_PROTOCOL_FCP:
+ offset = 8;
+ break;
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
+ return NULL;
+ }
+
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+ return buf + offset;
+}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 664171353289..e3195700211a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,7 +37,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
#include "target_core_file.h"
@@ -46,10 +45,6 @@ static inline struct fd_dev *FD_DEV(struct se_device *dev)
return container_of(dev, struct fd_dev, dev);
}
-/* fd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int fd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct fd_host *fd_host;
@@ -66,7 +61,7 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
- TARGET_CORE_MOD_VERSION);
+ TARGET_CORE_VERSION);
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
hba->hba_id, fd_host->fd_host_id);
@@ -246,87 +241,34 @@ fail:
return ret;
}
-static void fd_free_device(struct se_device *dev)
+static void fd_dev_call_rcu(struct rcu_head *p)
{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct fd_dev *fd_dev = FD_DEV(dev);
- if (fd_dev->fd_file) {
- filp_close(fd_dev->fd_file, NULL);
- fd_dev->fd_file = NULL;
- }
-
kfree(fd_dev);
}
-static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
- int is_write)
+static void fd_free_device(struct se_device *dev)
{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = FD_DEV(se_dev);
- struct file *prot_fd = dev->fd_prot_file;
- loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
- unsigned char *buf;
- u32 prot_size;
- int rc, ret = 1;
-
- prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
- se_dev->prot_length;
-
- if (!is_write) {
- fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
- if (!fd_prot->prot_buf) {
- pr_err("Unable to allocate fd_prot->prot_buf\n");
- return -ENOMEM;
- }
- buf = fd_prot->prot_buf;
-
- fd_prot->prot_sg_nents = 1;
- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!fd_prot->prot_sg) {
- pr_err("Unable to allocate fd_prot->prot_sg\n");
- kfree(fd_prot->prot_buf);
- return -ENOMEM;
- }
- sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
- sg_set_buf(fd_prot->prot_sg, buf, prot_size);
- }
-
- if (is_write) {
- rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
- if (rc < 0 || prot_size != rc) {
- pr_err("kernel_write() for fd_do_prot_rw failed:"
- " %d\n", rc);
- ret = -EINVAL;
- }
- } else {
- rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
- if (rc < 0) {
- pr_err("kernel_read() for fd_do_prot_rw failed:"
- " %d\n", rc);
- ret = -EINVAL;
- }
- }
+ struct fd_dev *fd_dev = FD_DEV(dev);
- if (is_write || ret < 0) {
- kfree(fd_prot->prot_sg);
- kfree(fd_prot->prot_buf);
+ if (fd_dev->fd_file) {
+ filp_close(fd_dev->fd_file, NULL);
+ fd_dev->fd_file = NULL;
}
-
- return ret;
+ call_rcu(&dev->rcu_head, fd_dev_call_rcu);
}
-static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, int is_write)
+static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+ u32 block_size, struct scatterlist *sgl,
+ u32 sgl_nents, u32 data_length, int is_write)
{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = FD_DEV(se_dev);
- struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iov_iter iter;
struct bio_vec *bvec;
ssize_t len = 0;
- loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
+ loff_t pos = (cmd->t_task_lba * block_size);
int ret = 0, i;
bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
@@ -352,7 +294,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
kfree(bvec);
if (is_write) {
- if (ret < 0 || ret != cmd->data_length) {
+ if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
return (ret < 0 ? ret : -EINVAL);
}
@@ -363,10 +305,10 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
* block_device.
*/
if (S_ISBLK(file_inode(fd)->i_mode)) {
- if (ret < 0 || ret != cmd->data_length) {
+ if (ret < 0 || ret != data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
- cmd->data_length);
+ data_length);
return (ret < 0 ? ret : -EINVAL);
}
} else {
@@ -533,9 +475,9 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
-fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
- struct file *file = priv;
+ struct file *file = FD_DEV(cmd->se_dev)->fd_file;
struct inode *inode = file->f_mapping->host;
int ret;
@@ -577,42 +519,13 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
}
static sense_reason_t
-fd_execute_write_same_unmap(struct se_cmd *cmd)
-{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *fd_dev = FD_DEV(se_dev);
- struct file *file = fd_dev->fd_file;
- sector_t lba = cmd->t_task_lba;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
- sense_reason_t ret;
-
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
- }
-
- ret = fd_do_unmap(cmd, file, lba, nolb);
- if (ret)
- return ret;
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static sense_reason_t
-fd_execute_unmap(struct se_cmd *cmd)
-{
- struct file *file = FD_DEV(cmd->se_dev)->fd_file;
-
- return sbc_execute_unmap(cmd, fd_do_unmap, file);
-}
-
-static sense_reason_t
fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
- struct fd_prot fd_prot;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *file = fd_dev->fd_file;
+ struct file *pfile = fd_dev->fd_prot_file;
sense_reason_t rc;
int ret = 0;
/*
@@ -630,58 +543,45 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
- memset(&fd_prot, 0, sizeof(struct fd_prot));
-
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ ret = fd_do_rw(cmd, pfile, dev->prot_length,
+ cmd->t_prot_sg, cmd->t_prot_nents,
+ cmd->prot_length, 0);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+ ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+ sgl, sgl_nents, cmd->data_length, 0);
if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+ u32 sectors = cmd->data_length >>
+ ilog2(dev->dev_attrib.block_size);
- rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
- 0, fd_prot.prot_sg, 0);
- if (rc) {
- kfree(fd_prot.prot_sg);
- kfree(fd_prot.prot_buf);
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+ 0, cmd->t_prot_sg, 0);
+ if (rc)
return rc;
- }
- kfree(fd_prot.prot_sg);
- kfree(fd_prot.prot_buf);
}
} else {
- memset(&fd_prot, 0, sizeof(struct fd_prot));
-
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+ u32 sectors = cmd->data_length >>
+ ilog2(dev->dev_attrib.block_size);
- ret = fd_do_prot_rw(cmd, &fd_prot, false);
- if (ret < 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
- 0, fd_prot.prot_sg, 0);
- if (rc) {
- kfree(fd_prot.prot_sg);
- kfree(fd_prot.prot_buf);
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+ 0, cmd->t_prot_sg, 0);
+ if (rc)
return rc;
- }
}
- ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
+ ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+ sgl, sgl_nents, cmd->data_length, 1);
/*
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
- if (ret > 0 &&
- dev->dev_attrib.emulate_fua_write > 0 &&
- (cmd->se_cmd_flags & SCF_FUA)) {
- struct fd_dev *fd_dev = FD_DEV(dev);
+ if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
loff_t start = cmd->t_task_lba *
dev->dev_attrib.block_size;
loff_t end;
@@ -695,17 +595,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- ret = fd_do_prot_rw(cmd, &fd_prot, true);
+ ret = fd_do_rw(cmd, pfile, dev->prot_length,
+ cmd->t_prot_sg, cmd->t_prot_nents,
+ cmd->prot_length, 1);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
}
- if (ret < 0) {
- kfree(fd_prot.prot_sg);
- kfree(fd_prot.prot_buf);
+ if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -908,7 +807,6 @@ static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
.execute_write_same = fd_execute_write_same,
- .execute_write_same_unmap = fd_execute_write_same_unmap,
.execute_unmap = fd_execute_unmap,
};
@@ -918,42 +816,7 @@ fd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
-DEF_TB_DEFAULT_ATTRIBS(fileio);
-
-static struct configfs_attribute *fileio_backend_dev_attrs[] = {
- &fileio_dev_attrib_emulate_model_alias.attr,
- &fileio_dev_attrib_emulate_dpo.attr,
- &fileio_dev_attrib_emulate_fua_write.attr,
- &fileio_dev_attrib_emulate_fua_read.attr,
- &fileio_dev_attrib_emulate_write_cache.attr,
- &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &fileio_dev_attrib_emulate_tas.attr,
- &fileio_dev_attrib_emulate_tpu.attr,
- &fileio_dev_attrib_emulate_tpws.attr,
- &fileio_dev_attrib_emulate_caw.attr,
- &fileio_dev_attrib_emulate_3pc.attr,
- &fileio_dev_attrib_pi_prot_type.attr,
- &fileio_dev_attrib_hw_pi_prot_type.attr,
- &fileio_dev_attrib_pi_prot_format.attr,
- &fileio_dev_attrib_enforce_pr_isids.attr,
- &fileio_dev_attrib_is_nonrot.attr,
- &fileio_dev_attrib_emulate_rest_reord.attr,
- &fileio_dev_attrib_force_pr_aptpl.attr,
- &fileio_dev_attrib_hw_block_size.attr,
- &fileio_dev_attrib_block_size.attr,
- &fileio_dev_attrib_hw_max_sectors.attr,
- &fileio_dev_attrib_optimal_sectors.attr,
- &fileio_dev_attrib_hw_queue_depth.attr,
- &fileio_dev_attrib_queue_depth.attr,
- &fileio_dev_attrib_max_unmap_lba_count.attr,
- &fileio_dev_attrib_max_unmap_block_desc_count.attr,
- &fileio_dev_attrib_unmap_granularity.attr,
- &fileio_dev_attrib_unmap_granularity_alignment.attr,
- &fileio_dev_attrib_max_write_same_len.attr,
- NULL,
-};
-
-static struct se_subsystem_api fileio_template = {
+static const struct target_backend_ops fileio_ops = {
.name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
@@ -971,21 +834,17 @@ static struct se_subsystem_api fileio_template = {
.init_prot = fd_init_prot,
.format_prot = fd_format_prot,
.free_prot = fd_free_prot,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init fileio_module_init(void)
{
- struct target_backend_cits *tbc = &fileio_template.tb_cits;
-
- target_core_setup_sub_cits(&fileio_template);
- tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
-
- return transport_subsystem_register(&fileio_template);
+ return transport_backend_register(&fileio_ops);
}
static void __exit fileio_module_exit(void)
{
- transport_subsystem_release(&fileio_template);
+ target_backend_unregister(&fileio_ops);
}
MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 182cbb295039..068966fce308 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -21,12 +21,6 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
#define FDBD_FORMAT_UNIT_SIZE 2048
-struct fd_prot {
- unsigned char *prot_buf;
- struct scatterlist *prot_sg;
- u32 prot_sg_nents;
-};
-
struct fd_dev {
struct se_device dev;
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index ff95f95dcd13..62ea4e8e70a8 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -36,67 +36,78 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
-static LIST_HEAD(subsystem_list);
-static DEFINE_MUTEX(subsystem_mutex);
+static LIST_HEAD(backend_list);
+static DEFINE_MUTEX(backend_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
-int transport_subsystem_register(struct se_subsystem_api *sub_api)
-{
- struct se_subsystem_api *s;
-
- INIT_LIST_HEAD(&sub_api->sub_api_list);
- mutex_lock(&subsystem_mutex);
- list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!strcmp(s->name, sub_api->name)) {
- pr_err("%p is already registered with"
- " duplicate name %s, unable to process"
- " request\n", s, s->name);
- mutex_unlock(&subsystem_mutex);
+int transport_backend_register(const struct target_backend_ops *ops)
+{
+ struct target_backend *tb, *old;
+
+ tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+ tb->ops = ops;
+
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(old, &backend_list, list) {
+ if (!strcmp(old->ops->name, ops->name)) {
+ pr_err("backend %s already registered.\n", ops->name);
+ mutex_unlock(&backend_mutex);
+ kfree(tb);
return -EEXIST;
}
}
- list_add_tail(&sub_api->sub_api_list, &subsystem_list);
- mutex_unlock(&subsystem_mutex);
+ target_setup_backend_cits(tb);
+ list_add_tail(&tb->list, &backend_list);
+ mutex_unlock(&backend_mutex);
- pr_debug("TCM: Registered subsystem plugin: %s struct module:"
- " %p\n", sub_api->name, sub_api->owner);
+ pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
+ ops->name, ops->owner);
return 0;
}
-EXPORT_SYMBOL(transport_subsystem_register);
+EXPORT_SYMBOL(transport_backend_register);
-void transport_subsystem_release(struct se_subsystem_api *sub_api)
+void target_backend_unregister(const struct target_backend_ops *ops)
{
- mutex_lock(&subsystem_mutex);
- list_del(&sub_api->sub_api_list);
- mutex_unlock(&subsystem_mutex);
+ struct target_backend *tb;
+
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(tb, &backend_list, list) {
+ if (tb->ops == ops) {
+ list_del(&tb->list);
+ kfree(tb);
+ break;
+ }
+ }
+ mutex_unlock(&backend_mutex);
}
-EXPORT_SYMBOL(transport_subsystem_release);
+EXPORT_SYMBOL(target_backend_unregister);
-static struct se_subsystem_api *core_get_backend(const char *sub_name)
+static struct target_backend *core_get_backend(const char *name)
{
- struct se_subsystem_api *s;
+ struct target_backend *tb;
- mutex_lock(&subsystem_mutex);
- list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!strcmp(s->name, sub_name))
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(tb, &backend_list, list) {
+ if (!strcmp(tb->ops->name, name))
goto found;
}
- mutex_unlock(&subsystem_mutex);
+ mutex_unlock(&backend_mutex);
return NULL;
found:
- if (s->owner && !try_module_get(s->owner))
- s = NULL;
- mutex_unlock(&subsystem_mutex);
- return s;
+ if (tb->ops->owner && !try_module_get(tb->ops->owner))
+ tb = NULL;
+ mutex_unlock(&backend_mutex);
+ return tb;
}
struct se_hba *
@@ -117,13 +128,13 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
- hba->transport = core_get_backend(plugin_name);
- if (!hba->transport) {
+ hba->backend = core_get_backend(plugin_name);
+ if (!hba->backend) {
ret = -EINVAL;
goto out_free_hba;
}
- ret = hba->transport->attach_hba(hba, plugin_dep_id);
+ ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
@@ -138,8 +149,8 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return hba;
out_module_put:
- module_put(hba->transport->owner);
- hba->transport = NULL;
+ module_put(hba->backend->ops->owner);
+ hba->backend = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
@@ -150,7 +161,7 @@ core_delete_hba(struct se_hba *hba)
{
WARN_ON(hba->dev_count);
- hba->transport->detach_hba(hba);
+ hba->backend->ops->detach_hba(hba);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
@@ -159,9 +170,9 @@ core_delete_hba(struct se_hba *hba)
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
- module_put(hba->transport->owner);
+ module_put(hba->backend->ops->owner);
- hba->transport = NULL;
+ hba->backend = NULL;
kfree(hba);
return 0;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 972ed1781ae2..6d88d24e6cce 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -40,7 +40,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
#include "target_core_iblock.h"
@@ -53,17 +52,11 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
}
-static struct se_subsystem_api iblock_template;
-
-/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+ IBLOCK_VERSION, TARGET_CORE_VERSION);
return 0;
}
@@ -197,6 +190,14 @@ out:
return ret;
}
+static void iblock_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ kfree(ib_dev);
+}
+
static void iblock_free_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -206,7 +207,7 @@ static void iblock_free_device(struct se_device *dev)
if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
- kfree(ib_dev);
+ call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -414,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
}
static sense_reason_t
-iblock_do_unmap(struct se_cmd *cmd, void *priv,
- sector_t lba, sector_t nolb)
+iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
- struct block_device *bdev = priv;
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
int ret;
ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
@@ -430,30 +430,6 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv,
}
static sense_reason_t
-iblock_execute_unmap(struct se_cmd *cmd)
-{
- struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
-
- return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
-}
-
-static sense_reason_t
-iblock_execute_write_same_unmap(struct se_cmd *cmd)
-{
- struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
- sector_t lba = cmd->t_task_lba;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
- sense_reason_t ret;
-
- ret = iblock_do_unmap(cmd, bdev, lba, nolb);
- if (ret)
- return ret;
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
struct iblock_req *ibr;
@@ -844,7 +820,6 @@ static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
- .execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap,
};
@@ -863,42 +838,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
return q->flush_flags & REQ_FLUSH;
}
-DEF_TB_DEFAULT_ATTRIBS(iblock);
-
-static struct configfs_attribute *iblock_backend_dev_attrs[] = {
- &iblock_dev_attrib_emulate_model_alias.attr,
- &iblock_dev_attrib_emulate_dpo.attr,
- &iblock_dev_attrib_emulate_fua_write.attr,
- &iblock_dev_attrib_emulate_fua_read.attr,
- &iblock_dev_attrib_emulate_write_cache.attr,
- &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &iblock_dev_attrib_emulate_tas.attr,
- &iblock_dev_attrib_emulate_tpu.attr,
- &iblock_dev_attrib_emulate_tpws.attr,
- &iblock_dev_attrib_emulate_caw.attr,
- &iblock_dev_attrib_emulate_3pc.attr,
- &iblock_dev_attrib_pi_prot_type.attr,
- &iblock_dev_attrib_hw_pi_prot_type.attr,
- &iblock_dev_attrib_pi_prot_format.attr,
- &iblock_dev_attrib_enforce_pr_isids.attr,
- &iblock_dev_attrib_is_nonrot.attr,
- &iblock_dev_attrib_emulate_rest_reord.attr,
- &iblock_dev_attrib_force_pr_aptpl.attr,
- &iblock_dev_attrib_hw_block_size.attr,
- &iblock_dev_attrib_block_size.attr,
- &iblock_dev_attrib_hw_max_sectors.attr,
- &iblock_dev_attrib_optimal_sectors.attr,
- &iblock_dev_attrib_hw_queue_depth.attr,
- &iblock_dev_attrib_queue_depth.attr,
- &iblock_dev_attrib_max_unmap_lba_count.attr,
- &iblock_dev_attrib_max_unmap_block_desc_count.attr,
- &iblock_dev_attrib_unmap_granularity.attr,
- &iblock_dev_attrib_unmap_granularity_alignment.attr,
- &iblock_dev_attrib_max_write_same_len.attr,
- NULL,
-};
-
-static struct se_subsystem_api iblock_template = {
+static const struct target_backend_ops iblock_ops = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
.inquiry_rev = IBLOCK_VERSION,
@@ -918,21 +858,17 @@ static struct se_subsystem_api iblock_template = {
.get_io_min = iblock_get_io_min,
.get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init iblock_module_init(void)
{
- struct target_backend_cits *tbc = &iblock_template.tb_cits;
-
- target_core_setup_sub_cits(&iblock_template);
- tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
-
- return transport_subsystem_register(&iblock_template);
+ return transport_backend_register(&iblock_ops);
}
static void __exit iblock_module_exit(void)
{
- transport_subsystem_release(&iblock_template);
+ target_backend_unregister(&iblock_ops);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 68bd7f5d9f73..99c24acfe676 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,6 +1,53 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_NAME_MAX_LEN 64
+#define TARGET_FABRIC_NAME_SIZE 32
+
+struct target_backend {
+ struct list_head list;
+
+ const struct target_backend_ops *ops;
+
+ struct config_item_type tb_dev_cit;
+ struct config_item_type tb_dev_attrib_cit;
+ struct config_item_type tb_dev_pr_cit;
+ struct config_item_type tb_dev_wwn_cit;
+ struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+ struct config_item_type tb_dev_stat_cit;
+};
+
+struct target_fabric_configfs {
+ atomic_t tf_access_cnt;
+ struct list_head tf_list;
+ struct config_group tf_group;
+ struct config_group tf_disc_group;
+ struct config_group *tf_default_groups[2];
+ const struct target_core_fabric_ops *tf_ops;
+
+ struct config_item_type tf_discovery_cit;
+ struct config_item_type tf_wwn_cit;
+ struct config_item_type tf_wwn_fabric_stats_cit;
+ struct config_item_type tf_tpg_cit;
+ struct config_item_type tf_tpg_base_cit;
+ struct config_item_type tf_tpg_lun_cit;
+ struct config_item_type tf_tpg_port_cit;
+ struct config_item_type tf_tpg_port_stat_cit;
+ struct config_item_type tf_tpg_np_cit;
+ struct config_item_type tf_tpg_np_base_cit;
+ struct config_item_type tf_tpg_attrib_cit;
+ struct config_item_type tf_tpg_auth_cit;
+ struct config_item_type tf_tpg_param_cit;
+ struct config_item_type tf_tpg_nacl_cit;
+ struct config_item_type tf_tpg_nacl_base_cit;
+ struct config_item_type tf_tpg_nacl_attrib_cit;
+ struct config_item_type tf_tpg_nacl_auth_cit;
+ struct config_item_type tf_tpg_nacl_param_cit;
+ struct config_item_type tf_tpg_nacl_stat_cit;
+ struct config_item_type tf_tpg_mappedlun_cit;
+ struct config_item_type tf_tpg_mappedlun_stat_cit;
+};
+
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
@@ -8,28 +55,27 @@ extern struct t10_alua_lu_gp *default_lu_gp;
extern struct mutex g_device_mutex;
extern struct list_head g_device_list;
+int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
-int core_free_device_list_for_node(struct se_node_acl *,
+void target_pr_kref_release(struct kref *);
+void core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
-void core_update_device_list_access(u32, u32, struct se_node_acl *);
+void core_update_device_list_access(u64, u32, struct se_node_acl *);
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *);
-int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *);
+ u64, u32, struct se_node_acl *, struct se_portal_group *);
+void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
+ struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
-int core_dev_export(struct se_device *, struct se_portal_group *,
- struct se_lun *);
-void core_dev_unexport(struct se_device *, struct se_portal_group *,
- struct se_lun *);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
+int core_dev_add_lun(struct se_portal_group *, struct se_device *,
+ struct se_lun *lun);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
- struct se_node_acl *, u32, int *);
+ struct se_node_acl *, u64, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
- struct se_lun_acl *, u32, u32);
-int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
- struct se_lun *, struct se_lun_acl *);
+ struct se_lun_acl *, struct se_lun *lun, u32);
+int core_dev_del_initiator_node_lun_acl(struct se_lun *,
+ struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
@@ -38,6 +84,18 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
+/* target_core_configfs.c */
+void target_setup_backend_cits(struct target_backend *);
+
+/* target_core_fabric_lib.c */
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code);
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code,
+ unsigned char *buf);
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ const char *buf, u32 *out_tid_len, char **port_nexus_ptr);
+
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
int core_delete_hba(struct se_hba *);
@@ -53,12 +111,16 @@ extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
-void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
+ struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
u32, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
+ const char *initiatorname);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
/* target_core_transport.c */
extern struct kmem_cache *se_tmr_req_cache;
@@ -77,14 +139,19 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
-int transport_clear_lun_ref(struct se_lun *);
+void transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
+bool target_check_wce(struct se_device *dev);
+bool target_check_fua(struct se_device *dev);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+/* target_core_xcopy.c */
+extern struct se_portal_group xcopy_pt_tpg;
+
#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8e5fa291f878..0fdbe43b7dad 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -35,7 +35,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
@@ -45,7 +44,6 @@
* Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
*/
struct pr_transport_id_holder {
- int dest_local_nexus;
struct t10_pr_registration *dest_pr_reg;
struct se_portal_group *dest_tpg;
struct se_node_acl *dest_node_acl;
@@ -231,9 +229,10 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
}
tpg = sess->se_tpg;
- pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
+ " MAPPED LUN: %llu for %s\n",
+ tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
out_unlock:
@@ -277,12 +276,12 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
tpg->se_tpg_tfo->get_fabric_name());
- pr_err("Original reserver LUN: %u %s\n",
+ pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
- pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
" from %s \n", cmd->se_lun->unpacked_lun,
- cmd->se_deve->mapped_lun,
+ cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
ret = TCM_RESERVATION_CONFLICT;
goto out_unlock;
@@ -294,9 +293,9 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
- pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
out_unlock:
@@ -314,28 +313,31 @@ out:
* This function is called by those initiator ports who are *NOT*
* the active PR reservation holder when a reservation is present.
*/
-static int core_scsi3_pr_seq_non_holder(
- struct se_cmd *cmd,
- u32 pr_reg_type)
+static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
+ bool isid_mismatch)
{
unsigned char *cdb = cmd->t_task_cdb;
- struct se_dev_entry *se_deve;
struct se_session *se_sess = cmd->se_sess;
- int other_cdb = 0, ignore_reg;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ int other_cdb = 0;
int registered_nexus = 0, ret = 1; /* Conflict by default */
int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
int we = 0; /* Write Exclusive */
int legacy = 0; /* Act like a legacy device and return
* RESERVATION CONFLICT on some CDBs */
- se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- /*
- * Determine if the registration should be ignored due to
- * non-matching ISIDs in target_scsi3_pr_reservation_check().
- */
- ignore_reg = (pr_reg_type & 0x80000000);
- if (ignore_reg)
- pr_reg_type &= ~0x80000000;
+ if (isid_mismatch) {
+ registered_nexus = 0;
+ } else {
+ struct se_dev_entry *se_deve;
+
+ rcu_read_lock();
+ se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (se_deve)
+ registered_nexus = test_bit(DEF_PR_REG_ACTIVE,
+ &se_deve->deve_flags);
+ rcu_read_unlock();
+ }
switch (pr_reg_type) {
case PR_TYPE_WRITE_EXCLUSIVE:
@@ -345,8 +347,6 @@ static int core_scsi3_pr_seq_non_holder(
* Some commands are only allowed for the persistent reservation
* holder.
*/
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
@@ -355,8 +355,6 @@ static int core_scsi3_pr_seq_non_holder(
* Some commands are only allowed for registered I_T Nexuses.
*/
reg_only = 1;
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
@@ -365,8 +363,6 @@ static int core_scsi3_pr_seq_non_holder(
* Each registered I_T Nexus is a reservation holder.
*/
all_reg = 1;
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
default:
return -EINVAL;
@@ -572,6 +568,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
u32 pr_reg_type;
+ bool isid_mismatch = false;
if (!dev->dev_pr_res_holder)
return 0;
@@ -584,7 +581,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
if (dev->dev_pr_res_holder->isid_present_at_reg) {
if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
sess->sess_bin_isid) {
- pr_reg_type |= 0x80000000;
+ isid_mismatch = true;
goto check_nonholder;
}
}
@@ -592,7 +589,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
return 0;
check_nonholder:
- if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+ if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch))
return TCM_RESERVATION_CONFLICT;
return 0;
}
@@ -620,7 +617,9 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
+ struct se_lun *lun,
struct se_dev_entry *deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -642,12 +641,12 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
atomic_set(&pr_reg->pr_res_holders, 0);
pr_reg->pr_reg_nacl = nacl;
pr_reg->pr_reg_deve = deve;
- pr_reg->pr_res_mapped_lun = deve->mapped_lun;
- pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+ pr_reg->pr_res_mapped_lun = mapped_lun;
+ pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
+ pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
pr_reg->pr_res_key = sa_res_key;
pr_reg->pr_reg_all_tg_pt = all_tg_pt;
pr_reg->pr_reg_aptpl = aptpl;
- pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
/*
* If an ISID value for this SCSI Initiator Port exists,
* save it to the registration now.
@@ -671,7 +670,9 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
static struct t10_pr_registration *__core_scsi3_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
+ struct se_lun *lun,
struct se_dev_entry *deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -679,7 +680,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
{
struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp;
- struct se_port *port, *port_tmp;
+ struct se_lun_acl *lacl_tmp;
+ struct se_lun *lun_tmp, *next, *dest_lun;
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret;
@@ -687,8 +689,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* Create a registration for the I_T Nexus upon which the
* PROUT REGISTER was received.
*/
- pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
- sa_res_key, all_tg_pt, aptpl);
+ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+ isid, sa_res_key, all_tg_pt,
+ aptpl);
if (!pr_reg)
return NULL;
/*
@@ -701,13 +704,13 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* for ALL_TG_PT=1
*/
spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
- atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
+ list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
+ if (!percpu_ref_tryget_live(&lun_tmp->lun_ref))
+ continue;
spin_unlock(&dev->se_port_lock);
- spin_lock_bh(&port->sep_alua_lock);
- list_for_each_entry(deve_tmp, &port->sep_alua_list,
- alua_port_list) {
+ spin_lock(&lun_tmp->lun_deve_lock);
+ list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
/*
* This pointer will be NULL for demo mode MappedLUNs
* that have not been make explicit via a ConfigFS
@@ -716,7 +719,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
- nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+ lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
+ lockdep_is_held(&lun_tmp->lun_deve_lock));
+ nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
* above..
@@ -736,8 +741,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
continue;
- atomic_inc_mb(&deve_tmp->pr_ref_count);
- spin_unlock_bh(&port->sep_alua_lock);
+ kref_get(&deve_tmp->pr_kref);
+ spin_unlock(&lun_tmp->lun_deve_lock);
/*
* Grab a configfs group dependency that is released
* for the exception path at label out: below, or upon
@@ -748,8 +753,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (ret < 0) {
pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
- atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
- atomic_dec_mb(&deve_tmp->pr_ref_count);
+ percpu_ref_put(&lun_tmp->lun_ref);
+ kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
goto out;
}
/*
@@ -759,24 +764,27 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
+ dest_lun = rcu_dereference_check(deve_tmp->se_lun,
+ atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
- nacl_tmp, deve_tmp, NULL,
+ nacl_tmp, dest_lun, deve_tmp,
+ deve_tmp->mapped_lun, NULL,
sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) {
- atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
- atomic_dec_mb(&deve_tmp->pr_ref_count);
+ percpu_ref_put(&lun_tmp->lun_ref);
core_scsi3_lunacl_undepend_item(deve_tmp);
goto out;
}
list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
&pr_reg->pr_reg_atp_list);
- spin_lock_bh(&port->sep_alua_lock);
+ spin_lock(&lun_tmp->lun_deve_lock);
}
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_unlock(&lun_tmp->lun_deve_lock);
spin_lock(&dev->se_port_lock);
- atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
+ percpu_ref_put(&lun_tmp->lun_ref);
}
spin_unlock(&dev->se_port_lock);
@@ -797,10 +805,10 @@ int core_scsi3_alloc_aptpl_registration(
u64 sa_res_key,
unsigned char *i_port,
unsigned char *isid,
- u32 mapped_lun,
+ u64 mapped_lun,
unsigned char *t_port,
u16 tpgt,
- u32 target_lun,
+ u64 target_lun,
int res_holder,
int all_tg_pt,
u8 type)
@@ -831,7 +839,6 @@ int core_scsi3_alloc_aptpl_registration(
pr_reg->pr_res_key = sa_res_key;
pr_reg->pr_reg_all_tg_pt = all_tg_pt;
pr_reg->pr_reg_aptpl = 1;
- pr_reg->pr_reg_tg_pt_lun = NULL;
pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
pr_reg->pr_res_type = type;
/*
@@ -895,9 +902,9 @@ static int __core_scsi3_check_aptpl_registration(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun,
- u32 target_lun,
+ u64 target_lun,
struct se_node_acl *nacl,
- struct se_dev_entry *deve)
+ u64 mapped_lun)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
@@ -925,14 +932,13 @@ static int __core_scsi3_check_aptpl_registration(
pr_reg_aptpl_list) {
if (!strcmp(pr_reg->pr_iport, i_port) &&
- (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+ (pr_reg->pr_res_mapped_lun == mapped_lun) &&
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
(pr_reg->pr_aptpl_target_lun == target_lun)) {
pr_reg->pr_reg_nacl = nacl;
- pr_reg->pr_reg_deve = deve;
- pr_reg->pr_reg_tg_pt_lun = lun;
+ pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
list_del(&pr_reg->pr_reg_aptpl_list);
spin_unlock(&pr_tmpl->aptpl_reg_lock);
@@ -967,15 +973,14 @@ int core_scsi3_check_aptpl_registration(
struct se_portal_group *tpg,
struct se_lun *lun,
struct se_node_acl *nacl,
- u32 mapped_lun)
+ u64 mapped_lun)
{
- struct se_dev_entry *deve = nacl->device_list[mapped_lun];
-
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
- lun->unpacked_lun, nacl, deve);
+ lun->unpacked_lun, nacl,
+ mapped_lun);
}
static void __core_scsi3_dump_registration(
@@ -1009,10 +1014,6 @@ static void __core_scsi3_dump_registration(
pr_reg->pr_reg_aptpl);
}
-/*
- * this function can be called with struct se_device->dev_reservation_lock
- * when register_move = 1
- */
static void __core_scsi3_add_registration(
struct se_device *dev,
struct se_node_acl *nacl,
@@ -1023,6 +1024,7 @@ static void __core_scsi3_add_registration(
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ struct se_dev_entry *deve;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1039,10 +1041,16 @@ static void __core_scsi3_add_registration(
spin_lock(&pr_tmpl->registration_lock);
list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
- pr_reg->pr_reg_deve->def_pr_registered = 1;
__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
spin_unlock(&pr_tmpl->registration_lock);
+
+ rcu_read_lock();
+ deve = pr_reg->pr_reg_deve;
+ if (deve)
+ set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ rcu_read_unlock();
+
/*
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
@@ -1054,6 +1062,8 @@ static void __core_scsi3_add_registration(
*/
list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl;
+
list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
@@ -1061,12 +1071,17 @@ static void __core_scsi3_add_registration(
spin_lock(&pr_tmpl->registration_lock);
list_add_tail(&pr_reg_tmp->pr_reg_list,
&pr_tmpl->registration_list);
- pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
- __core_scsi3_dump_registration(tfo, dev,
- pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
- register_type);
+ __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
+ register_type);
spin_unlock(&pr_tmpl->registration_lock);
+
+ rcu_read_lock();
+ deve = pr_reg_tmp->pr_reg_deve;
+ if (deve)
+ set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ rcu_read_unlock();
+
/*
* Drop configfs group dependency reference from
* __core_scsi3_alloc_registration()
@@ -1078,7 +1093,9 @@ static void __core_scsi3_add_registration(
static int core_scsi3_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
+ struct se_lun *lun,
struct se_dev_entry *deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -1088,8 +1105,9 @@ static int core_scsi3_alloc_registration(
{
struct t10_pr_registration *pr_reg;
- pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
- sa_res_key, all_tg_pt, aptpl);
+ pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+ isid, sa_res_key, all_tg_pt,
+ aptpl);
if (!pr_reg)
return -EPERM;
@@ -1242,13 +1260,13 @@ static void __core_scsi3_free_registration(
const struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct se_dev_entry *deve;
char i_buf[PR_REG_ISID_ID_LEN];
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
- pr_reg->pr_reg_deve->def_pr_registered = 0;
- pr_reg->pr_reg_deve->pr_res_key = 0;
if (!list_empty(&pr_reg->pr_reg_list))
list_del(&pr_reg->pr_reg_list);
/*
@@ -1257,6 +1275,8 @@ static void __core_scsi3_free_registration(
*/
if (dec_holders)
core_scsi3_put_pr_reg(pr_reg);
+
+ spin_unlock(&pr_tmpl->registration_lock);
/*
* Wait until all reference from any other I_T nexuses for this
* *pr_reg have been released. Because list_del() is called above,
@@ -1264,13 +1284,18 @@ static void __core_scsi3_free_registration(
* count back to zero, and we release *pr_reg.
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
- spin_unlock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
tfo->get_fabric_name());
cpu_relax();
- spin_lock(&pr_tmpl->registration_lock);
}
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun);
+ if (deve)
+ clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ rcu_read_unlock();
+
+ spin_lock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
@@ -1392,12 +1417,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_lun_acl *lun_acl;
struct se_node_acl *nacl;
struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
+ lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+ atomic_read(&se_deve->pr_kref.refcount) != 0);
if (!lun_acl)
return 0;
@@ -1409,21 +1436,23 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_lun_acl *lun_acl;
struct se_node_acl *nacl;
struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
+ lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+ atomic_read(&se_deve->pr_kref.refcount) != 0);
if (!lun_acl) {
- atomic_dec_mb(&se_deve->pr_ref_count);
+ kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
target_undepend_item(&lun_acl->se_lun_group.cg_item);
- atomic_dec_mb(&se_deve->pr_ref_count);
+ kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
static sense_reason_t
@@ -1436,30 +1465,25 @@ core_scsi3_decode_spec_i_port(
int aptpl)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL;
- struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+ struct se_dev_entry *dest_se_deve = NULL;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
- const struct target_core_fabric_ops *tmp_tf_ops;
- unsigned char *buf;
- unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+ unsigned char *buf, *ptr, proto_ident;
+ const unsigned char *i_str;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
- int dest_local_nexus;
u32 dest_rtpi = 0;
- local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Allocate a struct pr_transport_id_holder and setup the
- * local_node_acl and local_se_deve pointers and add to
- * struct list_head tid_dest_list for add registration
- * processing in the loop of tid_dest_list below.
+ * local_node_acl pointer and add to struct list_head tid_dest_list
+ * for add registration processing in the loop of tid_dest_list below.
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
@@ -1469,10 +1493,10 @@ core_scsi3_decode_spec_i_port(
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
tidh_new->dest_node_acl = se_sess->se_node_acl;
- tidh_new->dest_se_deve = local_se_deve;
local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
- se_sess->se_node_acl, local_se_deve, l_isid,
+ se_sess->se_node_acl, cmd->se_lun,
+ NULL, cmd->orig_fe_lun, l_isid,
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
@@ -1481,10 +1505,10 @@ core_scsi3_decode_spec_i_port(
tidh_new->dest_pr_reg = local_pr_reg;
/*
* The local I_T nexus does not hold any configfs dependances,
- * so we set tid_h->dest_local_nexus=1 to prevent the
+ * so we set tidh_new->dest_se_deve to NULL to prevent the
* configfs_undepend_item() calls in the tid_dest_list loops below.
*/
- tidh_new->dest_local_nexus = 1;
+ tidh_new->dest_se_deve = NULL;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
if (cmd->data_length < 28) {
@@ -1525,32 +1549,25 @@ core_scsi3_decode_spec_i_port(
ptr = &buf[28];
while (tpdl > 0) {
+ struct se_lun *dest_lun, *tmp_lun;
+
proto_ident = (ptr[0] & 0x0f);
dest_tpg = NULL;
spin_lock(&dev->se_port_lock);
- list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
- tmp_tpg = tmp_port->sep_tpg;
- if (!tmp_tpg)
- continue;
- tmp_tf_ops = tmp_tpg->se_tpg_tfo;
- if (!tmp_tf_ops)
- continue;
- if (!tmp_tf_ops->get_fabric_proto_ident ||
- !tmp_tf_ops->tpg_parse_pr_out_transport_id)
- continue;
+ list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+ tmp_tpg = tmp_lun->lun_tpg;
+
/*
* Look for the matching proto_ident provided by
* the received TransportID
*/
- tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
- if (tmp_proto_ident != proto_ident)
+ if (tmp_tpg->proto_id != proto_ident)
continue;
- dest_rtpi = tmp_port->sep_rtpi;
+ dest_rtpi = tmp_lun->lun_rtpi;
- i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
- tmp_tpg, (const char *)ptr, &tid_len,
- &iport_ptr);
+ i_str = target_parse_pr_out_transport_id(tmp_tpg,
+ (const char *)ptr, &tid_len, &iport_ptr);
if (!i_str)
continue;
@@ -1569,12 +1586,12 @@ core_scsi3_decode_spec_i_port(
* from the decoded fabric module specific TransportID
* at *i_str.
*/
- spin_lock_irq(&tmp_tpg->acl_node_lock);
+ mutex_lock(&tmp_tpg->acl_node_mutex);
dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str);
if (dest_node_acl)
atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
- spin_unlock_irq(&tmp_tpg->acl_node_lock);
+ mutex_unlock(&tmp_tpg->acl_node_mutex);
if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -1644,7 +1661,7 @@ core_scsi3_decode_spec_i_port(
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
- atomic_dec_mb(&dest_se_deve->pr_ref_count);
+ kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1652,7 +1669,7 @@ core_scsi3_decode_spec_i_port(
}
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
- " dest_se_deve mapped_lun: %u\n",
+ " dest_se_deve mapped_lun: %llu\n",
dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
@@ -1708,9 +1725,13 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
+ dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+ atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
- dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, all_tg_pt, aptpl);
+ dest_node_acl, dest_lun, dest_se_deve,
+ dest_se_deve->mapped_lun, iport_ptr,
+ sa_res_key, all_tg_pt, aptpl);
if (!dest_pr_reg) {
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1748,7 +1769,6 @@ core_scsi3_decode_spec_i_port(
dest_node_acl = tidh->dest_node_acl;
dest_se_deve = tidh->dest_se_deve;
dest_pr_reg = tidh->dest_pr_reg;
- dest_local_nexus = tidh->dest_local_nexus;
list_del(&tidh->dest_list);
kfree(tidh);
@@ -1761,10 +1781,11 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
- dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun);
+ " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
+ dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
+ dest_se_deve->mapped_lun : 0);
- if (dest_local_nexus)
+ if (!dest_se_deve)
continue;
core_scsi3_lunacl_undepend_item(dest_se_deve);
@@ -1785,7 +1806,6 @@ out:
dest_node_acl = tidh->dest_node_acl;
dest_se_deve = tidh->dest_se_deve;
dest_pr_reg = tidh->dest_pr_reg;
- dest_local_nexus = tidh->dest_local_nexus;
list_del(&tidh->dest_list);
kfree(tidh);
@@ -1803,7 +1823,7 @@ out:
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
- if (dest_local_nexus)
+ if (!dest_se_deve)
continue;
core_scsi3_lunacl_undepend_item(dest_se_deve);
@@ -1818,7 +1838,6 @@ static int core_scsi3_update_aptpl_buf(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct se_lun *lun;
struct se_portal_group *tpg;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
@@ -1837,7 +1856,6 @@ static int core_scsi3_update_aptpl_buf(
tmp[0] = '\0';
isid_buf[0] = '\0';
tpg = pr_reg->pr_reg_nacl->se_tpg;
- lun = pr_reg->pr_reg_tg_pt_lun;
/*
* Write out any ISID value to APTPL metadata that was included
* in the original registration.
@@ -1856,7 +1874,7 @@ static int core_scsi3_update_aptpl_buf(
"sa_res_key=%llu\n"
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
- "mapped_lun=%u\n", reg_count,
+ "mapped_lun=%llu\n", reg_count,
tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
@@ -1866,7 +1884,7 @@ static int core_scsi3_update_aptpl_buf(
snprintf(tmp, 512, "PR_REG_START: %d\n"
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
- "res_all_tg_pt=%d\nmapped_lun=%u\n",
+ "res_all_tg_pt=%d\nmapped_lun=%llu\n",
reg_count, tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
@@ -1885,11 +1903,12 @@ static int core_scsi3_update_aptpl_buf(
* Include information about the associated SCSI target port.
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
- "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+ "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
" %d\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
- lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+ pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
+ reg_count);
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming APTPL metadata,"
@@ -2000,7 +2019,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
- struct se_dev_entry *se_deve;
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
@@ -2014,7 +2032,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
se_tpg = se_sess->se_tpg;
- se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
@@ -2045,7 +2062,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Logical Unit of the SCSI device server.
*/
if (core_scsi3_alloc_registration(cmd->se_dev,
- se_sess->se_node_acl, se_deve, isid_ptr,
+ se_sess->se_node_acl, cmd->se_lun,
+ NULL, cmd->orig_fe_lun, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
register_type, 0)) {
pr_err("Unable to allocate"
@@ -2066,7 +2084,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
if (ret != 0)
return ret;
}
-
return core_scsi3_update_and_write_aptpl(dev, aptpl);
}
@@ -2180,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
&pr_tmpl->registration_list,
pr_reg_list) {
- core_scsi3_ua_allocate(
+ target_ua_allocate_lun(
pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun,
0x2A,
@@ -2607,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
if (pr_reg_p == pr_reg)
continue;
- core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
}
@@ -2630,7 +2647,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
struct se_session *se_sess = cmd->se_sess;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- u32 pr_res_mapped_lun = 0;
+ u64 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2692,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
* additional sense code set to RESERVATIONS PREEMPTED.
*/
if (!calling_it_nexus)
- core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+ target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
@@ -2786,7 +2803,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
LIST_HEAD(preempt_and_abort_list);
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- u32 pr_res_mapped_lun = 0;
+ u64 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0;
bool sa_res_key_unmatched = sa_res_key != 0;
int prh_type = 0, prh_scope = 0;
@@ -2901,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
NULL, 0);
}
if (!calling_it_nexus)
- core_scsi3_ua_allocate(pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
@@ -3007,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* persistent reservation and/or registration, with the
* additional sense code set to REGISTRATIONS PREEMPTED;
*/
- core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+ target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
@@ -3040,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (calling_it_nexus)
continue;
- core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg->pr_reg_nacl,
pr_reg->pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_RELEASED);
}
@@ -3099,15 +3116,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *dest_se_deve = NULL;
- struct se_lun *se_lun = cmd->se_lun;
+ struct se_lun *se_lun = cmd->se_lun, *tmp_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
- struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- unsigned char *initiator_str;
+ const unsigned char *initiator_str;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
@@ -3186,12 +3202,10 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
}
spin_lock(&dev->se_port_lock);
- list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
- if (se_port->sep_rtpi != rtpi)
- continue;
- dest_se_tpg = se_port->sep_tpg;
- if (!dest_se_tpg)
+ list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+ if (tmp_lun->lun_rtpi != rtpi)
continue;
+ dest_se_tpg = tmp_lun->lun_tpg;
dest_tf_ops = dest_se_tpg->se_tpg_tfo;
if (!dest_tf_ops)
continue;
@@ -3230,23 +3244,16 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
- if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+ if (proto_ident != dest_se_tpg->proto_id) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
- dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+ dest_se_tpg->proto_id,
dest_tf_ops->get_fabric_name());
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
- pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
- " containg a valid tpg_parse_pr_out_transport_id"
- " function pointer\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto out;
- }
- initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+ initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
@@ -3295,12 +3302,12 @@ after_iport_check:
/*
* Locate the destination struct se_node_acl from the received Transport ID
*/
- spin_lock_irq(&dest_se_tpg->acl_node_lock);
+ mutex_lock(&dest_se_tpg->acl_node_mutex);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str);
if (dest_node_acl)
atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
- spin_unlock_irq(&dest_se_tpg->acl_node_lock);
+ mutex_unlock(&dest_se_tpg->acl_node_mutex);
if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for"
@@ -3337,14 +3344,14 @@ after_iport_check:
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
- atomic_dec_mb(&dest_se_deve->pr_ref_count);
+ kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
dest_se_deve = NULL;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
- " ACL for dest_se_deve->mapped_lun: %u\n",
+ " ACL for dest_se_deve->mapped_lun: %llu\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
@@ -3421,13 +3428,17 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- if (core_scsi3_alloc_registration(cmd->se_dev,
- dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, 0, aptpl, 2, 1)) {
- spin_unlock(&dev->dev_reservation_lock);
+ struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+ atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
+ dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
+ iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
+ spin_lock(&dev->dev_reservation_lock);
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
new_reg = 1;
@@ -3883,9 +3894,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+ u32 add_desc_len = 0, add_len = 0;
u32 off = 8; /* off into first Full Status descriptor */
int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+ int exp_desc_len, desc_len;
bool all_reg = false;
if (cmd->data_length < 8) {
@@ -3930,10 +3942,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Determine expected length of $FABRIC_MOD specific
* TransportID full status descriptor..
*/
- exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
- se_tpg, se_nacl, pr_reg, &format_code);
-
- if ((exp_desc_len + add_len) > cmd->data_length) {
+ exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg,
+ &format_code);
+ if (exp_desc_len < 0 ||
+ exp_desc_len + add_len > cmd->data_length) {
pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
@@ -3990,21 +4002,26 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* IDENTIFIER field are not defined by this standard.
*/
if (!pr_reg->pr_reg_all_tg_pt) {
- struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+ u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ buf[off++] = ((sep_rtpi >> 8) & 0xff);
+ buf[off++] = (sep_rtpi & 0xff);
} else
off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
+ buf[off+4] = se_tpg->proto_id;
+
/*
- * Now, have the $FABRIC_MOD fill in the protocol identifier
+ * Now, have the $FABRIC_MOD fill in the transport ID.
*/
- desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
- se_nacl, pr_reg, &format_code, &buf[off+4]);
+ desc_len = target_get_pr_transport_id(se_nacl, pr_reg,
+ &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec_mb(&pr_reg->pr_res_holders);
+
+ if (desc_len < 0)
+ break;
/*
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 749fd7bb7510..e3d26e9126a0 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -56,11 +56,11 @@ extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
- unsigned char *, unsigned char *, u32,
- unsigned char *, u16, u32, int, int, u8);
+ unsigned char *, unsigned char *, u64,
+ unsigned char *, u16, u64, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
struct se_portal_group *, struct se_lun *,
- struct se_node_acl *, u32);
+ struct se_node_acl *, u64);
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 26581e215141..08e9084ee615 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -42,9 +42,9 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h"
+#include "target_core_internal.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
@@ -54,8 +54,6 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
return container_of(dev, struct pscsi_dev_virt, dev);
}
-static struct se_subsystem_api pscsi_template;
-
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
@@ -80,7 +78,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+ PSCSI_VERSION, TARGET_CORE_VERSION);
pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
hba->hba_id);
@@ -579,6 +577,14 @@ static int pscsi_configure_device(struct se_device *dev)
return -ENODEV;
}
+static void pscsi_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+
+ kfree(pdv);
+}
+
static void pscsi_free_device(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
@@ -610,8 +616,7 @@ static void pscsi_free_device(struct se_device *dev)
pdv->pdv_sd = NULL;
}
-
- kfree(pdv);
+ call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
}
static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
@@ -635,12 +640,14 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
- if (!cmd->se_deve || !cmd->data_length)
+ if (!cmd->data_length)
goto after_mode_sense;
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+ bool read_only = target_lun_is_rdonly(cmd);
+
+ if (read_only) {
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
@@ -1116,27 +1123,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
kfree(pt);
}
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
-TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
-TB_DEV_ATTR_RO(pscsi, hw_block_size);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
-TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
-TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
-
-static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
- &pscsi_dev_attrib_hw_pi_prot_type.attr,
- &pscsi_dev_attrib_hw_block_size.attr,
- &pscsi_dev_attrib_hw_max_sectors.attr,
- &pscsi_dev_attrib_hw_queue_depth.attr,
- NULL,
-};
-
-static struct se_subsystem_api pscsi_template = {
+static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1152,21 +1139,17 @@ static struct se_subsystem_api pscsi_template = {
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
+ .tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init pscsi_module_init(void)
{
- struct target_backend_cits *tbc = &pscsi_template.tb_cits;
-
- target_core_setup_sub_cits(&pscsi_template);
- tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
-
- return transport_subsystem_register(&pscsi_template);
+ return transport_backend_register(&pscsi_ops);
}
static void __exit pscsi_module_exit(void)
{
- transport_subsystem_release(&pscsi_template);
+ target_backend_unregister(&pscsi_ops);
}
MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b2d8f6f91633..4703f403f31c 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -33,7 +33,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
#include "target_core_rd.h"
@@ -42,10 +41,6 @@ static inline struct rd_dev *RD_DEV(struct se_device *dev)
return container_of(dev, struct rd_dev, dev);
}
-/* rd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct rd_host *rd_host;
@@ -62,7 +57,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+ RD_HBA_VERSION, TARGET_CORE_VERSION);
return 0;
}
@@ -354,12 +349,20 @@ fail:
return ret;
}
+static void rd_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ kfree(rd_dev);
+}
+
static void rd_free_device(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
- kfree(rd_dev);
+ call_rcu(&dev->rcu_head, rd_dev_call_rcu);
}
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
@@ -402,10 +405,7 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page
return NULL;
}
-typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
- unsigned int, struct scatterlist *, int);
-
-static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
{
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
@@ -465,7 +465,16 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
- rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
+ if (is_read)
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ else
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+ cmd->t_prot_sg, 0);
+
+ if (!rc)
+ sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
+
if (need_to_release)
kfree(prot_sg);
@@ -511,7 +520,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
data_direction == DMA_TO_DEVICE) {
- rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
+ rc = rd_do_prot_rw(cmd, false);
if (rc)
return rc;
}
@@ -579,7 +588,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
data_direction == DMA_FROM_DEVICE) {
- rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
+ rc = rd_do_prot_rw(cmd, true);
if (rc)
return rc;
}
@@ -693,42 +702,7 @@ rd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
-DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
-
-static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
- &rd_mcp_dev_attrib_emulate_model_alias.attr,
- &rd_mcp_dev_attrib_emulate_dpo.attr,
- &rd_mcp_dev_attrib_emulate_fua_write.attr,
- &rd_mcp_dev_attrib_emulate_fua_read.attr,
- &rd_mcp_dev_attrib_emulate_write_cache.attr,
- &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &rd_mcp_dev_attrib_emulate_tas.attr,
- &rd_mcp_dev_attrib_emulate_tpu.attr,
- &rd_mcp_dev_attrib_emulate_tpws.attr,
- &rd_mcp_dev_attrib_emulate_caw.attr,
- &rd_mcp_dev_attrib_emulate_3pc.attr,
- &rd_mcp_dev_attrib_pi_prot_type.attr,
- &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
- &rd_mcp_dev_attrib_pi_prot_format.attr,
- &rd_mcp_dev_attrib_enforce_pr_isids.attr,
- &rd_mcp_dev_attrib_is_nonrot.attr,
- &rd_mcp_dev_attrib_emulate_rest_reord.attr,
- &rd_mcp_dev_attrib_force_pr_aptpl.attr,
- &rd_mcp_dev_attrib_hw_block_size.attr,
- &rd_mcp_dev_attrib_block_size.attr,
- &rd_mcp_dev_attrib_hw_max_sectors.attr,
- &rd_mcp_dev_attrib_optimal_sectors.attr,
- &rd_mcp_dev_attrib_hw_queue_depth.attr,
- &rd_mcp_dev_attrib_queue_depth.attr,
- &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
- &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
- &rd_mcp_dev_attrib_unmap_granularity.attr,
- &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
- &rd_mcp_dev_attrib_max_write_same_len.attr,
- NULL,
-};
-
-static struct se_subsystem_api rd_mcp_template = {
+static const struct target_backend_ops rd_mcp_ops = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
@@ -744,25 +718,15 @@ static struct se_subsystem_api rd_mcp_template = {
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
int __init rd_module_init(void)
{
- struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
- int ret;
-
- target_core_setup_sub_cits(&rd_mcp_template);
- tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
-
- ret = transport_subsystem_register(&rd_mcp_template);
- if (ret < 0) {
- return ret;
- }
-
- return 0;
+ return transport_backend_register(&rd_mcp_ops);
}
void rd_module_exit(void)
{
- transport_subsystem_release(&rd_mcp_template);
+ target_backend_unregister(&rd_mcp_ops);
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 43719b393ca9..e318ddbe15da 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -38,6 +38,7 @@
static sense_reason_t
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -177,6 +178,23 @@ sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
+sbc_execute_write_same_unmap(struct se_cmd *cmd)
+{
+ struct sbc_ops *ops = cmd->protocol_data;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ sense_reason_t ret;
+
+ if (nolb) {
+ ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
+ if (ret)
+ return ret;
+ }
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
@@ -299,7 +317,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
* translated into block discard requests within backend code.
*/
if (flags[0] & 0x08) {
- if (!ops->execute_write_same_unmap)
+ if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (!dev->dev_attrib.emulate_tpws) {
@@ -307,7 +325,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
" has emulate_tpws disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
- cmd->execute_cmd = ops->execute_write_same_unmap;
+ cmd->execute_cmd = sbc_execute_write_same_unmap;
return 0;
}
if (!ops->execute_write_same)
@@ -381,7 +399,9 @@ out:
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
- return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+ struct sbc_ops *ops = cmd->protocol_data;
+
+ return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
cmd->data_direction);
}
@@ -560,6 +580,7 @@ out:
static sense_reason_t
sbc_compare_and_write(struct se_cmd *cmd)
{
+ struct sbc_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
int rc;
@@ -579,7 +600,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
*/
cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
- ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+ ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
DMA_FROM_DEVICE);
if (ret) {
cmd->transport_complete_callback = NULL;
@@ -738,14 +759,15 @@ static int
sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
if (cdb[1] & 0x10) {
- if (!dev->dev_attrib.emulate_dpo) {
+ /* see explanation in spc_emulate_modesense */
+ if (!target_check_fua(dev)) {
pr_err("Got CDB: 0x%02x with DPO bit set, but device"
" does not advertise support for DPO\n", cdb[0]);
return -EINVAL;
}
}
if (cdb[1] & 0x8) {
- if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
+ if (!target_check_fua(dev)) {
pr_err("Got CDB: 0x%02x with FUA bit set, but device"
" does not advertise support for FUA write\n",
cdb[0]);
@@ -765,12 +787,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
u32 sectors = 0;
sense_reason_t ret;
+ cmd->protocol_data = ops;
+
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_10:
@@ -785,7 +808,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_12:
@@ -800,7 +822,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case READ_16:
@@ -815,14 +836,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_10:
@@ -838,7 +857,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_12:
@@ -853,7 +871,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_16:
@@ -868,7 +885,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
break;
case XDWRITEREAD_10:
@@ -886,7 +902,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
break;
@@ -910,7 +925,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
break;
@@ -954,7 +968,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
cmd->t_task_nolb = sectors;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
- cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_compare_and_write;
cmd->transport_complete_callback = compare_and_write_callback;
break;
@@ -1004,7 +1017,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
size = get_unaligned_be16(&cdb[7]);
- cmd->execute_cmd = ops->execute_unmap;
+ cmd->execute_cmd = sbc_execute_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb);
@@ -1092,12 +1105,10 @@ u32 sbc_get_device_type(struct se_device *dev)
}
EXPORT_SYMBOL(sbc_get_device_type);
-sense_reason_t
-sbc_execute_unmap(struct se_cmd *cmd,
- sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
- sector_t, sector_t),
- void *priv)
+static sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd)
{
+ struct sbc_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
sector_t lba;
@@ -1161,7 +1172,7 @@ sbc_execute_unmap(struct se_cmd *cmd,
goto err;
}
- ret = do_unmap_fn(cmd, priv, lba, range);
+ ret = ops->execute_unmap(cmd, lba, range);
if (ret)
goto err;
@@ -1175,34 +1186,56 @@ err:
target_complete_cmd(cmd, GOOD);
return ret;
}
-EXPORT_SYMBOL(sbc_execute_unmap);
void
sbc_dif_generate(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ struct scatterlist *dsg = cmd->t_data_sg, *psg;
sector_t sector = cmd->t_task_lba;
void *daddr, *paddr;
int i, j, offset = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
- for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
- daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+ for (j = 0; j < psg->length;
+ j += sizeof(struct se_dif_v1_tuple)) {
+ __u16 crc;
+ unsigned int avail;
+
+ if (offset >= dsg->length) {
+ offset -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ }
- if (offset >= psg->length) {
- kunmap_atomic(paddr);
- psg = sg_next(psg);
- paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- offset = 0;
+ sdt = paddr + j;
+ avail = min(block_size, dsg->length - offset);
+ crc = crc_t10dif(daddr + offset, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ offset = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, offset);
+ } else {
+ offset += block_size;
}
- sdt = paddr + offset;
- sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
- dev->dev_attrib.block_size));
+ sdt->guard_tag = cpu_to_be16(crc);
if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
@@ -1215,26 +1248,23 @@ sbc_dif_generate(struct se_cmd *cmd)
be32_to_cpu(sdt->ref_tag));
sector++;
- offset += sizeof(struct se_dif_v1_tuple);
}
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
}
}
static sense_reason_t
sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
- const void *p, sector_t sector, unsigned int ei_lba)
+ __u16 crc, sector_t sector, unsigned int ei_lba)
{
- struct se_device *dev = cmd->se_dev;
- int block_size = dev->dev_attrib.block_size;
__be16 csum;
if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
goto check_ref;
- csum = cpu_to_be16(crc_t10dif(p, block_size));
+ csum = cpu_to_be16(crc);
if (sdt->guard_tag != csum) {
pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
@@ -1266,9 +1296,8 @@ check_ref:
return 0;
}
-static void
-sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
- struct scatterlist *sg, int sg_off)
+void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+ struct scatterlist *sg, int sg_off)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *psg;
@@ -1300,100 +1329,54 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
copied += len;
psg_len -= len;
+ kunmap_atomic(addr - sg->offset - offset);
+
if (offset >= sg->length) {
sg = sg_next(sg);
offset = 0;
}
- kunmap_atomic(addr);
}
- kunmap_atomic(paddr);
+ kunmap_atomic(paddr - psg->offset);
}
}
+EXPORT_SYMBOL(sbc_dif_copy_prot);
sense_reason_t
-sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
- unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *psg, int psg_off)
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ struct scatterlist *dsg = cmd->t_data_sg;
sector_t sector = start;
void *daddr, *paddr;
- int i, j, offset = 0;
+ int i;
sense_reason_t rc;
+ int dsg_off = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
- for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
- daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ for (; psg && sector < start + sectors; psg = sg_next(psg)) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-
- for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
-
- if (offset >= psg->length) {
- kunmap_atomic(paddr);
- psg = sg_next(psg);
- paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- offset = 0;
- }
-
- sdt = paddr + offset;
-
- pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
- " app_tag: 0x%04x ref_tag: %u\n",
- (unsigned long long)sector, sdt->guard_tag,
- sdt->app_tag, be32_to_cpu(sdt->ref_tag));
-
- rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
- ei_lba);
- if (rc) {
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
- cmd->bad_sector = sector;
- return rc;
- }
-
- sector++;
- ei_lba++;
- offset += sizeof(struct se_dif_v1_tuple);
- }
-
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
- }
- if (!sg)
- return 0;
-
- sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
-
- return 0;
-}
-EXPORT_SYMBOL(sbc_dif_verify_write);
-
-static sense_reason_t
-__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
- unsigned int ei_lba, struct scatterlist *sg, int sg_off)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg, *psg = sg;
- sector_t sector = start;
- void *daddr, *paddr;
- int i, j, offset = sg_off;
- sense_reason_t rc;
-
- for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- paddr = kmap_atomic(sg_page(psg)) + sg->offset;
-
- for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
- if (offset >= psg->length) {
- kunmap_atomic(paddr);
- psg = sg_next(psg);
- paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- offset = 0;
+ for (i = psg_off; i < psg->length &&
+ sector < start + sectors;
+ i += sizeof(struct se_dif_v1_tuple)) {
+ __u16 crc;
+ unsigned int avail;
+
+ if (dsg_off >= dsg->length) {
+ dsg_off -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
}
- sdt = paddr + offset;
+ sdt = paddr + i;
pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
@@ -1401,53 +1384,43 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
if (sdt->app_tag == cpu_to_be16(0xffff)) {
- sector++;
- offset += sizeof(struct se_dif_v1_tuple);
- continue;
+ dsg_off += block_size;
+ goto next;
+ }
+
+ avail = min(block_size, dsg->length - dsg_off);
+ crc = crc_t10dif(daddr + dsg_off, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ dsg_off = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, dsg_off);
+ } else {
+ dsg_off += block_size;
}
- rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
- ei_lba);
+ rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
if (rc) {
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
cmd->bad_sector = sector;
return rc;
}
-
+next:
sector++;
ei_lba++;
- offset += sizeof(struct se_dif_v1_tuple);
}
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ psg_off = 0;
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
}
return 0;
}
-
-sense_reason_t
-sbc_dif_read_strip(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- u32 sectors = cmd->prot_length / dev->prot_length;
-
- return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
- cmd->t_prot_sg, 0);
-}
-
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
- unsigned int ei_lba, struct scatterlist *sg, int sg_off)
-{
- sense_reason_t rc;
-
- rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
- if (rc)
- return rc;
-
- sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
- return 0;
-}
-EXPORT_SYMBOL(sbc_dif_verify_read);
+EXPORT_SYMBOL(sbc_dif_verify);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 52ea640274f4..b0744433315a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -38,10 +38,9 @@
#include "target_core_ua.h"
#include "target_core_xcopy.h"
-static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
+static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
/*
* Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
@@ -54,17 +53,11 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
*
* See spc4r17 section 6.4.2 Table 135
*/
- if (!port)
- return;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
}
sense_reason_t
@@ -95,7 +88,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- spc_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun, buf);
/*
* Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
@@ -182,11 +175,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
- struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *prod = &dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
@@ -268,18 +259,15 @@ check_t10_vend_desc:
/* Header size for Designation descriptor */
len += (id_len + 4);
off += (id_len + 4);
- /*
- * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
- */
- port = lun->lun_sep;
- if (port) {
+
+ if (1) {
struct t10_alua_lu_gp *lu_gp;
u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
- tpg = port->sep_tpg;
+ tpg = lun->lun_tpg;
/*
* Relative target port identifer, see spc4r17
* section 7.7.3.7
@@ -287,8 +275,7 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -300,8 +287,8 @@ check_t10_vend_desc:
/* Skip over Obsolete field in RTPI payload
* in Table 472 */
off += 2;
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+ buf[off++] = (lun->lun_rtpi & 0xff);
len += 8; /* Header size + Designation descriptor */
/*
* Target port group identifier, see spc4r17
@@ -310,21 +297,16 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- goto check_lu_gp;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = lun->lun_tg_pt_gp;
if (!tg_pt_gp) {
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
goto check_lu_gp;
}
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -372,8 +354,7 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -413,8 +394,7 @@ check_scsi_name:
/*
* Target device designator
*/
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target device: 10b */
@@ -482,7 +462,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (se_dev_check_wce(dev))
+ if (target_check_wce(dev))
buf[6] = 0x01;
/* If an LBA map is present set R_SUP */
spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -699,7 +679,7 @@ static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+ struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
@@ -713,7 +693,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+ if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
buf[0] = 0x3f; /* Not connected */
else
buf[0] = dev->transport->get_device_type(dev);
@@ -889,7 +869,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
if (pc == 1)
goto out;
- if (se_dev_check_wce(dev))
+ if (target_check_wce(dev))
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -986,6 +966,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
int length = 0;
int ret;
int i;
+ bool read_only = target_lun_is_rdonly(cmd);;
memset(buf, 0, SE_MODE_PAGE_BUF);
@@ -996,13 +977,15 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
spc_modesense_write_protect(&buf[length], type);
- if ((se_dev_check_wce(dev)) &&
- (dev->dev_attrib.emulate_fua_write > 0))
+ /*
+ * SBC only allows us to enable FUA and DPO together. Fortunately
+ * DPO is explicitly specified as a hint, so a noop is a perfectly
+ * valid implementation.
+ */
+ if (target_check_fua(dev))
spc_modesense_dpofua(&buf[length], type);
++length;
@@ -1212,8 +1195,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
unsigned char *buf;
- u32 lun_count = 0, offset = 8, i;
+ u32 lun_count = 0, offset = 8;
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
@@ -1235,12 +1219,10 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
lun_count = 1;
goto done;
}
+ nacl = sess->se_node_acl;
- spin_lock_irq(&sess->se_node_acl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = sess->se_node_acl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
@@ -1253,7 +1235,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
offset += 8;
}
- spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+ rcu_read_unlock();
/*
* See SPC3 r07, page 159.
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 40f6c1378041..20ed5d2e151a 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -37,7 +37,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_internal.h"
@@ -104,7 +103,7 @@ static ssize_t target_stat_scsi_dev_show_attr_ports(
struct se_device *dev =
container_of(sgrps, struct se_device, dev_stat_grps);
- return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+ return snprintf(page, PAGE_SIZE, "%u\n", dev->export_count);
}
DEV_STAT_SCSI_DEV_ATTR_RO(ports);
@@ -540,20 +539,14 @@ static ssize_t target_stat_scsi_port_show_attr_inst(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_device *dev = lun->lun_se_dev;
- struct se_hba *hba;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_PORT_ATTR_RO(inst);
@@ -562,18 +555,14 @@ static ssize_t target_stat_scsi_port_show_attr_dev(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_device *dev = lun->lun_se_dev;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_PORT_ATTR_RO(dev);
@@ -582,17 +571,14 @@ static ssize_t target_stat_scsi_port_show_attr_indx(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_PORT_ATTR_RO(indx);
@@ -601,21 +587,14 @@ static ssize_t target_stat_scsi_port_show_attr_role(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- ssize_t ret;
-
- if (!dev)
- return -ENODEV;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_PORT_ATTR_RO(role);
@@ -624,18 +603,16 @@ static ssize_t target_stat_scsi_port_show_attr_busy_count(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* FIXME: scsiPortBusyStatuses */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
- /* FIXME: scsiPortBusyStatuses */
- ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
@@ -683,20 +660,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_hba *hba;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
@@ -705,18 +676,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
@@ -725,17 +692,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
@@ -744,21 +708,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
-
- ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_portal_group *tpg = lun->lun_tpg;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+ tpg->se_tpg_tfo->get_fabric_name(),
+ lun->lun_rtpi);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
@@ -767,22 +727,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
-
- ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
- tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&lun->lun_sep_lock);
+ struct se_portal_group *tpg = lun->lun_tpg;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
@@ -791,18 +746,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&lun->lun_stats.cmd_pdus));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
@@ -811,19 +763,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(sep->sep_stats.rx_data_octets >> 20));
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
@@ -832,19 +780,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(sep->sep_stats.tx_data_octets >> 20));
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
@@ -853,19 +797,16 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* FIXME: scsiTgtPortHsInCommands */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
-
- /* FIXME: scsiTgtPortHsInCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
@@ -919,21 +860,14 @@ static ssize_t target_stat_scsi_transport_show_attr_inst(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_hba *hba;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
@@ -942,21 +876,18 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* scsiTransportType */
+ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+ tpg->se_tpg_tfo->get_fabric_name());
}
- tpg = sep->sep_tpg;
- /* scsiTransportType */
- ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- tpg->se_tpg_tfo->get_fabric_name());
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
@@ -965,20 +896,16 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock(&lun->lun_sep_lock);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
@@ -987,26 +914,22 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
struct se_port_stat_grps *pgrps, char *page)
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_portal_group *tpg;
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
struct t10_wwn *wwn;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ wwn = &dev->t10_wwn;
+ /* scsiTransportDevName */
+ ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ (strlen(wwn->unit_serial)) ? wwn->unit_serial :
+ wwn->vendor);
}
- tpg = sep->sep_tpg;
- wwn = &dev->t10_wwn;
- /* scsiTransportDevName */
- ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
- tpg->se_tpg_tfo->tpg_get_wwn(tpg),
- (strlen(wwn->unit_serial)) ? wwn->unit_serial :
- wwn->vendor);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
@@ -1082,17 +1005,17 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
@@ -1107,16 +1030,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
struct se_lun *lun;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
- lun = deve->se_lun;
+ lun = rcu_dereference(deve->se_lun);
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
@@ -1131,16 +1054,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
@@ -1154,15 +1077,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
@@ -1176,15 +1099,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrDevOrPort */
ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
@@ -1198,15 +1121,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrName */
ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
@@ -1220,15 +1143,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrLunMapIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
@@ -1242,15 +1165,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrAttachedTimes */
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
@@ -1264,15 +1187,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&deve->total_cmds));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
@@ -1286,15 +1210,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(atomic_long_read(&deve->read_bytes) >> 20));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
@@ -1308,15 +1233,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ (u32)(atomic_long_read(&deve->write_bytes) >> 20));
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
@@ -1330,15 +1256,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrHSOutCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
@@ -1352,16 +1278,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrLastCreation */
ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
INITIAL_JIFFIES) * 100 / HZ));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
@@ -1375,15 +1301,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrRowStatus */
ret = snprintf(page, PAGE_SIZE, "Ready\n");
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
@@ -1448,17 +1374,17 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
@@ -1473,16 +1399,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
struct se_lun *lun;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
- lun = deve->se_lun;
+ lun = rcu_dereference(deve->se_lun);
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
@@ -1497,16 +1423,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
@@ -1546,15 +1472,15 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAttIntrPortAuthIntrIdx */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index a5bb0c46e57e..5b2820312310 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -31,7 +31,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -115,7 +114,7 @@ void core_tmr_abort_task(
{
struct se_cmd *se_cmd;
unsigned long flags;
- int ref_tag;
+ u64 ref_tag;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
@@ -127,16 +126,17 @@ void core_tmr_abort_task(
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
- ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
+ ref_tag = se_cmd->tag;
if (tmr->ref_task_tag != ref_tag)
continue;
- printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+ printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_COMPLETE) {
- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+ printk("ABORT_TASK: ref_tag: %llu already complete,"
+ " skipping\n", ref_tag);
spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
goto out;
@@ -151,18 +151,18 @@ void core_tmr_abort_task(
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- target_put_sess_cmd(se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
transport_cmd_finish_abort(se_cmd, true);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
- " ref_tag: %d\n", ref_tag);
+ " ref_tag: %llu\n", ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
return;
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
out:
- printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
+ printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
tmr->ref_task_tag);
tmr->response = TMR_TASK_DOES_NOT_EXIST;
}
@@ -287,16 +287,16 @@ static void core_tmr_drain_state_list(
list_del(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p"
- " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+ " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
"cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd,
- cmd->se_tfo->get_task_tag(cmd), 0,
+ cmd->tag, 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->t_task_cdb[0]);
- pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
" -- CMD_T_ACTIVE: %d"
" CMD_T_STOP: %d CMD_T_SENT: %d\n",
- cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->tag, cmd->pr_res_key,
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
(cmd->transport_state & CMD_T_SENT) != 0);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 84de757bd458..babde4ad841f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,6 +39,7 @@
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
+#include "target_core_alua.h"
#include "target_core_pr.h"
extern struct se_device *g_lun0_dev;
@@ -46,45 +47,9 @@ extern struct se_device *g_lun0_dev;
static DEFINE_SPINLOCK(tpg_lock);
static LIST_HEAD(tpg_list);
-/* core_clear_initiator_node_from_tpg():
- *
- *
- */
-static void core_clear_initiator_node_from_tpg(
- struct se_node_acl *nacl,
- struct se_portal_group *tpg)
-{
- int i;
- struct se_dev_entry *deve;
- struct se_lun *lun;
-
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
-
- if (!deve->se_lun) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
-
- lun = deve->se_lun;
- spin_unlock_irq(&nacl->device_list_lock);
- core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
- spin_lock_irq(&nacl->device_list_lock);
- }
- spin_unlock_irq(&nacl->device_list_lock);
-}
-
/* __core_tpg_get_initiator_node_acl():
*
- * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ * mutex_lock(&tpg->acl_node_mutex); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
@@ -110,9 +75,9 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
{
struct se_node_acl *acl;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return acl;
}
@@ -124,22 +89,20 @@ EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
- struct se_portal_group *tpg)
+ struct se_portal_group *tpg,
+ struct se_lun *lun_orig)
{
- int i = 0;
u32 lun_access = 0;
struct se_lun *lun;
struct se_device *dev;
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = tpg->tpg_lun_list[i];
- if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ mutex_lock(&tpg->tpg_lun_mutex);
+ hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
+ if (lun_orig && lun != lun_orig)
continue;
- spin_unlock(&tpg->tpg_lun_lock);
-
- dev = lun->lun_se_dev;
+ dev = rcu_dereference_check(lun->lun_se_dev,
+ lockdep_is_held(&tpg->tpg_lun_mutex));
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
@@ -157,7 +120,7 @@ void core_tpg_add_node_to_devs(
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
- pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -165,7 +128,7 @@ void core_tpg_add_node_to_devs(
"READ-WRITE" : "READ-ONLY");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg);
+ lun_access, acl, tpg);
/*
* Check to see if there are any existing persistent reservation
* APTPL pre-registrations that need to be enabled for this dynamic
@@ -173,9 +136,8 @@ void core_tpg_add_node_to_devs(
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
lun->unpacked_lun);
- spin_lock(&tpg->tpg_lun_lock);
}
- spin_unlock(&tpg->tpg_lun_lock);
+ mutex_unlock(&tpg->tpg_lun_mutex);
}
/* core_set_queue_depth_for_node():
@@ -196,67 +158,63 @@ static int core_set_queue_depth_for_node(
return 0;
}
-void array_free(void *array, int n)
+static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
+ const unsigned char *initiatorname)
{
- void **a = array;
- int i;
+ struct se_node_acl *acl;
- for (i = 0; i < n; i++)
- kfree(a[i]);
- kfree(a);
-}
+ acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
+ GFP_KERNEL);
+ if (!acl)
+ return NULL;
-static void *array_zalloc(int n, size_t size, gfp_t flags)
-{
- void **a;
- int i;
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ INIT_HLIST_HEAD(&acl->lun_entry_hlist);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
+ spin_lock_init(&acl->nacl_sess_lock);
+ mutex_init(&acl->lun_entry_mutex);
+ atomic_set(&acl->acl_pr_ref_count, 0);
+ if (tpg->se_tpg_tfo->tpg_get_default_depth)
+ acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+ else
+ acl->queue_depth = 1;
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- a = kzalloc(n * sizeof(void*), flags);
- if (!a)
- return NULL;
- for (i = 0; i < n; i++) {
- a[i] = kzalloc(size, flags);
- if (!a[i]) {
- array_free(a, n);
- return NULL;
- }
- }
- return a;
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0)
+ goto out_free_acl;
+
+ return acl;
+
+out_free_acl:
+ kfree(acl);
+ return NULL;
}
-/* core_create_device_list_for_node():
- *
- *
- */
-static int core_create_device_list_for_node(struct se_node_acl *nacl)
+static void target_add_node_acl(struct se_node_acl *acl)
{
- struct se_dev_entry *deve;
- int i;
-
- nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
- sizeof(struct se_dev_entry), GFP_KERNEL);
- if (!nacl->device_list) {
- pr_err("Unable to allocate memory for"
- " struct se_node_acl->device_list\n");
- return -ENOMEM;
- }
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- atomic_set(&deve->ua_count, 0);
- atomic_set(&deve->pr_ref_count, 0);
- spin_lock_init(&deve->ua_lock);
- INIT_LIST_HEAD(&deve->alua_port_list);
- INIT_LIST_HEAD(&deve->ua_list);
- }
+ struct se_portal_group *tpg = acl->se_tpg;
- return 0;
+ mutex_lock(&tpg->acl_node_mutex);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ tpg->num_node_acls++;
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n",
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
+ acl->dynamic_node_acl ? "DYNAMIC" : "",
+ acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ acl->initiatorname);
}
-/* core_tpg_check_initiator_node_acl()
- *
- *
- */
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
@@ -270,35 +228,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
- acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ acl = target_alloc_node_acl(tpg, initiatorname);
if (!acl)
return NULL;
-
- INIT_LIST_HEAD(&acl->acl_list);
- INIT_LIST_HEAD(&acl->acl_sess_list);
- kref_init(&acl->acl_kref);
- init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
- spin_lock_init(&acl->nacl_sess_lock);
- atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
- snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
- acl->se_tpg = tpg;
- acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
acl->dynamic_node_acl = 1;
- tpg->se_tpg_tfo->set_default_node_attributes(acl);
-
- if (core_create_device_list_for_node(acl) < 0) {
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return NULL;
- }
-
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- core_free_device_list_for_node(acl, tpg);
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return NULL;
- }
/*
* Here we only create demo-mode MappedLUNs from the active
* TPG LUNs if the fabric is not explicitly asking for
@@ -306,18 +240,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
*/
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
- core_tpg_add_node_to_devs(acl, tpg);
-
- spin_lock_irq(&tpg->acl_node_lock);
- list_add_tail(&acl->acl_list, &tpg->acl_node_list);
- tpg->num_node_acls++;
- spin_unlock_irq(&tpg->acl_node_lock);
-
- pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+ core_tpg_add_node_to_devs(acl, tpg, NULL);
+ target_add_node_acl(acl);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
@@ -328,40 +253,13 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
cpu_relax();
}
-void core_tpg_clear_object_luns(struct se_portal_group *tpg)
-{
- int i;
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = tpg->tpg_lun_list[i];
-
- if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
- (lun->lun_se_dev == NULL))
- continue;
-
- spin_unlock(&tpg->tpg_lun_lock);
- core_dev_del_lun(tpg, lun);
- spin_lock(&tpg->tpg_lun_lock);
- }
- spin_unlock(&tpg->tpg_lun_lock);
-}
-EXPORT_SYMBOL(core_tpg_clear_object_luns);
-
-/* core_tpg_add_initiator_node_acl():
- *
- *
- */
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
- struct se_node_acl *se_nacl,
- const char *initiatorname,
- u32 queue_depth)
+ const char *initiatorname)
{
- struct se_node_acl *acl = NULL;
+ struct se_node_acl *acl;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (acl->dynamic_node_acl) {
@@ -369,99 +267,42 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
- spin_unlock_irq(&tpg->acl_node_lock);
- /*
- * Release the locally allocated struct se_node_acl
- * because * core_tpg_add_initiator_node_acl() returned
- * a pointer to an existing demo mode node ACL.
- */
- if (se_nacl)
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
- se_nacl);
- goto done;
+ mutex_unlock(&tpg->acl_node_mutex);
+ return acl;
}
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return ERR_PTR(-EEXIST);
}
- spin_unlock_irq(&tpg->acl_node_lock);
-
- if (!se_nacl) {
- pr_err("struct se_node_acl pointer is NULL\n");
- return ERR_PTR(-EINVAL);
- }
- /*
- * For v4.x logic the se_node_acl_s is hanging off a fabric
- * dependent structure allocated via
- * struct target_core_fabric_ops->fabric_make_nodeacl()
- */
- acl = se_nacl;
+ mutex_unlock(&tpg->acl_node_mutex);
- INIT_LIST_HEAD(&acl->acl_list);
- INIT_LIST_HEAD(&acl->acl_sess_list);
- kref_init(&acl->acl_kref);
- init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
- spin_lock_init(&acl->nacl_sess_lock);
- atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = queue_depth;
- snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
- acl->se_tpg = tpg;
- acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
-
- tpg->se_tpg_tfo->set_default_node_attributes(acl);
-
- if (core_create_device_list_for_node(acl) < 0) {
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+ acl = target_alloc_node_acl(tpg, initiatorname);
+ if (!acl)
return ERR_PTR(-ENOMEM);
- }
-
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- core_free_device_list_for_node(acl, tpg);
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_irq(&tpg->acl_node_lock);
- list_add_tail(&acl->acl_list, &tpg->acl_node_list);
- tpg->num_node_acls++;
- spin_unlock_irq(&tpg->acl_node_lock);
-
-done:
- pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+ target_add_node_acl(acl);
return acl;
}
-EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
-/* core_tpg_del_initiator_node_acl():
- *
- *
- */
-int core_tpg_del_initiator_node_acl(
- struct se_portal_group *tpg,
- struct se_node_acl *acl,
- int force)
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
{
+ struct se_portal_group *tpg = acl->se_tpg;
LIST_HEAD(sess_list);
struct se_session *sess, *sess_tmp;
unsigned long flags;
int rc;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
acl->acl_stop = 1;
@@ -493,7 +334,6 @@ int core_tpg_del_initiator_node_acl(
wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
- core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
@@ -501,9 +341,8 @@ int core_tpg_del_initiator_node_acl(
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
- return 0;
+ kfree(acl);
}
-EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
/* core_tpg_set_initiator_node_queue_depth():
*
@@ -520,21 +359,21 @@ int core_tpg_set_initiator_node_queue_depth(
unsigned long flags;
int dynamic_acl = 0;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) {
pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
spin_lock_irqsave(&tpg->session_lock, flags);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -550,10 +389,10 @@ int core_tpg_set_initiator_node_queue_depth(
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_irqrestore(&tpg->session_lock, flags);
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return -EEXIST;
}
/*
@@ -588,10 +427,10 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return -EINVAL;
}
spin_unlock_irqrestore(&tpg->session_lock, flags);
@@ -607,10 +446,10 @@ int core_tpg_set_initiator_node_queue_depth(
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return 0;
}
@@ -646,78 +485,54 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
complete(&lun->lun_ref_comp);
}
-static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
-{
- /* Set in core_dev_setup_virtual_lun0() */
- struct se_device *dev = g_lun0_dev;
- struct se_lun *lun = &se_tpg->tpg_virt_lun0;
- u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
- int ret;
-
- lun->unpacked_lun = 0;
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- atomic_set(&lun->lun_acl_count, 0);
- init_completion(&lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&lun->lun_acl_list);
- spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_sep_lock);
- init_completion(&lun->lun_ref_comp);
-
- ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
int core_tpg_register(
- const struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
- void *tpg_fabric_ptr,
- int se_tpg_type)
+ int proto_id)
{
- struct se_lun *lun;
- u32 i;
-
- se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
- sizeof(struct se_lun), GFP_KERNEL);
- if (!se_tpg->tpg_lun_list) {
- pr_err("Unable to allocate struct se_portal_group->"
- "tpg_lun_list\n");
- return -ENOMEM;
- }
+ int ret;
+
+ if (!se_tpg)
+ return -EINVAL;
+ /*
+ * For the typical case where core_tpg_register() is called by a
+ * fabric driver from target_core_fabric_ops->fabric_make_tpg()
+ * configfs context, use the original tf_ops pointer already saved
+ * by target-core in target_fabric_make_wwn().
+ *
+ * Otherwise, for special cases like iscsi-target discovery TPGs
+ * the caller is responsible for setting ->se_tpg_tfo ahead of
+ * calling core_tpg_register().
+ */
+ if (se_wwn)
+ se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = se_tpg->tpg_lun_list[i];
- lun->unpacked_lun = i;
- lun->lun_link_magic = SE_LUN_LINK_MAGIC;
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- atomic_set(&lun->lun_acl_count, 0);
- init_completion(&lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&lun->lun_acl_list);
- spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_sep_lock);
- init_completion(&lun->lun_ref_comp);
+ if (!se_tpg->se_tpg_tfo) {
+ pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
+ return -EINVAL;
}
- se_tpg->se_tpg_type = se_tpg_type;
- se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
- se_tpg->se_tpg_tfo = tfo;
+ INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
+ se_tpg->proto_id = proto_id;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
- spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
- spin_lock_init(&se_tpg->tpg_lun_lock);
-
- if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
- if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
- array_free(se_tpg->tpg_lun_list,
- TRANSPORT_MAX_LUNS_PER_TPG);
- return -ENOMEM;
+ mutex_init(&se_tpg->tpg_lun_mutex);
+ mutex_init(&se_tpg->acl_node_mutex);
+
+ if (se_tpg->proto_id >= 0) {
+ se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
+ if (IS_ERR(se_tpg->tpg_virt_lun0))
+ return PTR_ERR(se_tpg->tpg_virt_lun0);
+
+ ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
+ TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
+ if (ret < 0) {
+ kfree(se_tpg->tpg_virt_lun0);
+ return ret;
}
}
@@ -725,11 +540,11 @@ int core_tpg_register(
list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
spin_unlock_bh(&tpg_lock);
- pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
- " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
- (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
- "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+ pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
+ "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
+ se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
return 0;
}
@@ -737,14 +552,14 @@ EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
+ const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
struct se_node_acl *nacl, *nacl_tmp;
+ LIST_HEAD(node_list);
- pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
- " for endpoint: %s Portal Tag %u\n",
- (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+ pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
+ "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
+ tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
+ se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
spin_lock_bh(&tpg_lock);
list_del(&se_tpg->se_tpg_node);
@@ -752,61 +567,56 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
+
+ mutex_lock(&se_tpg->acl_node_mutex);
+ list_splice_init(&se_tpg->acl_node_list, &node_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
/*
* Release any remaining demo-mode generated se_node_acl that have
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session().
*/
- spin_lock_irq(&se_tpg->acl_node_lock);
- list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
- acl_list) {
+ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
list_del(&nacl->acl_list);
se_tpg->num_node_acls--;
- spin_unlock_irq(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
- se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
-
- spin_lock_irq(&se_tpg->acl_node_lock);
+ kfree(nacl);
}
- spin_unlock_irq(&se_tpg->acl_node_lock);
- if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
- core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
+ if (se_tpg->proto_id >= 0) {
+ core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
+ kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
+ }
- se_tpg->se_tpg_fabric_ptr = NULL;
- array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
- u32 unpacked_lun)
+ u64 unpacked_lun)
{
struct se_lun *lun;
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
- "-1: %u for Target Portal Group: %u\n",
- tpg->se_tpg_tfo->get_fabric_name(),
- unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- return ERR_PTR(-EOVERFLOW);
- }
-
- spin_lock(&tpg->tpg_lun_lock);
- lun = tpg->tpg_lun_list[unpacked_lun];
- if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
- pr_err("TPG Logical Unit Number: %u is already active"
- " on %s Target Portal Group: %u, ignoring request.\n",
- unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return ERR_PTR(-EINVAL);
+ lun = kzalloc(sizeof(*lun), GFP_KERNEL);
+ if (!lun) {
+ pr_err("Unable to allocate se_lun memory\n");
+ return ERR_PTR(-ENOMEM);
}
- spin_unlock(&tpg->tpg_lun_lock);
+ lun->unpacked_lun = unpacked_lun;
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_ref_comp);
+ INIT_LIST_HEAD(&lun->lun_deve_list);
+ INIT_LIST_HEAD(&lun->lun_dev_link);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+ spin_lock_init(&lun->lun_deve_lock);
+ mutex_init(&lun->lun_tg_pt_md_mutex);
+ INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
+ spin_lock_init(&lun->lun_tg_pt_gp_lock);
+ lun->lun_tpg = tpg;
return lun;
}
@@ -822,34 +632,70 @@ int core_tpg_add_lun(
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
GFP_KERNEL);
if (ret < 0)
- return ret;
+ goto out;
- ret = core_dev_export(dev, tpg, lun);
- if (ret < 0) {
- percpu_ref_exit(&lun->lun_ref);
- return ret;
- }
+ ret = core_alloc_rtpi(lun, dev);
+ if (ret)
+ goto out_kill_ref;
+
+ if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
+
+ mutex_lock(&tpg->tpg_lun_mutex);
+
+ spin_lock(&dev->se_port_lock);
+ lun->lun_index = dev->dev_index;
+ rcu_assign_pointer(lun->lun_se_dev, dev);
+ dev->export_count++;
+ list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
+ spin_unlock(&dev->se_port_lock);
- spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
- lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
- spin_unlock(&tpg->tpg_lun_lock);
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
+ mutex_unlock(&tpg->tpg_lun_mutex);
return 0;
+
+out_kill_ref:
+ percpu_ref_exit(&lun->lun_ref);
+out:
+ return ret;
}
void core_tpg_remove_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
core_clear_lun_from_tpg(lun, tpg);
+ /*
+ * Wait for any active I/O references to percpu se_lun->lun_ref to
+ * be released. Also, se_lun->lun_ref is now used by PR and ALUA
+ * logic when referencing a remote target port during ALL_TGT_PT=1
+ * and generating UNIT_ATTENTIONs for ALUA access state transition.
+ */
transport_clear_lun_ref(lun);
- core_dev_unexport(lun->lun_se_dev, tpg, lun);
+ mutex_lock(&tpg->tpg_lun_mutex);
+ if (lun->lun_se_dev) {
+ target_detach_tg_pt_gp(lun);
- spin_lock(&tpg->tpg_lun_lock);
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- spin_unlock(&tpg->tpg_lun_lock);
+ spin_lock(&dev->se_port_lock);
+ list_del(&lun->lun_dev_link);
+ dev->export_count--;
+ rcu_assign_pointer(lun->lun_se_dev, NULL);
+ spin_unlock(&dev->se_port_lock);
+ }
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ hlist_del_rcu(&lun->link);
+ mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index cd3bfc16d25f..ce8574b7220c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -43,7 +43,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -60,7 +59,6 @@ struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
-struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
@@ -119,16 +117,6 @@ int init_se_kmem_caches(void)
"cache failed\n");
goto out_free_lu_gp_mem_cache;
}
- t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
- "t10_alua_tg_pt_gp_mem_cache",
- sizeof(struct t10_alua_tg_pt_gp_member),
- __alignof__(struct t10_alua_tg_pt_gp_member),
- 0, NULL);
- if (!t10_alua_tg_pt_gp_mem_cache) {
- pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
- "mem_t failed\n");
- goto out_free_tg_pt_gp_cache;
- }
t10_alua_lba_map_cache = kmem_cache_create(
"t10_alua_lba_map_cache",
sizeof(struct t10_alua_lba_map),
@@ -136,7 +124,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lba_map_cache) {
pr_err("kmem_cache_create() for t10_alua_lba_map_"
"cache failed\n");
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_tg_pt_gp_cache;
}
t10_alua_lba_map_mem_cache = kmem_cache_create(
"t10_alua_lba_map_mem_cache",
@@ -159,8 +147,6 @@ out_free_lba_map_mem_cache:
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache:
kmem_cache_destroy(t10_alua_lba_map_cache);
-out_free_tg_pt_gp_mem_cache:
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
@@ -186,7 +172,6 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
kmem_cache_destroy(t10_alua_lba_map_cache);
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
@@ -406,12 +391,6 @@ EXPORT_SYMBOL(target_get_session);
void target_put_session(struct se_session *se_sess)
{
- struct se_portal_group *tpg = se_sess->se_tpg;
-
- if (tpg->se_tpg_tfo->put_session != NULL) {
- tpg->se_tpg_tfo->put_session(se_sess);
- return;
- }
kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);
@@ -498,7 +477,7 @@ void transport_deregister_session(struct se_session *se_sess)
const struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
- bool comp_nacl = true;
+ bool comp_nacl = true, drop_nacl = false;
if (!se_tpg) {
transport_free_session(se_sess);
@@ -518,22 +497,22 @@ void transport_deregister_session(struct se_session *se_sess)
*/
se_nacl = se_sess->se_node_acl;
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+ mutex_lock(&se_tpg->acl_node_mutex);
if (se_nacl && se_nacl->dynamic_node_acl) {
if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
- core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_free_device_list_for_node(se_nacl, se_tpg);
- se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
-
- comp_nacl = false;
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+ drop_nacl = true;
}
}
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+ mutex_unlock(&se_tpg->acl_node_mutex);
+ if (drop_nacl) {
+ core_tpg_wait_for_nacl_pr_ref(se_nacl);
+ core_free_device_list_for_node(se_nacl, se_tpg);
+ kfree(se_nacl);
+ comp_nacl = false;
+ }
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
@@ -593,9 +572,8 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -1148,6 +1126,8 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
+ *
+ * Preserves the value of @cmd->tag.
*/
void transport_init_se_cmd(
struct se_cmd *cmd,
@@ -1274,11 +1254,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
-
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep)
- cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
return 0;
}
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
@@ -1346,11 +1322,9 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
@@ -1375,6 +1349,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sgl_prot: struct scatterlist memory protection information
* @sgl_prot_count: scatterlist count for protection information
*
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
* but still return zero here.
@@ -1383,7 +1359,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* assumes internal allocation of fabric payload buffer by target-core.
*/
int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+ unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags,
struct scatterlist *sgl, u32 sgl_count,
struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
@@ -1412,7 +1388,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
if (ret)
return ret;
/*
@@ -1426,7 +1402,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
if (rc) {
transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
return 0;
}
@@ -1443,6 +1419,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (sgl_prot_count) {
se_cmd->t_prot_sg = sgl_prot;
se_cmd->t_prot_nents = sgl_prot_count;
+ se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
}
/*
@@ -1506,6 +1483,8 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
* but still return zero here.
@@ -1516,7 +1495,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
* It also assumes interal target core SGL memory allocation.
*/
int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+ unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
@@ -1553,7 +1532,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
**/
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *sense, u32 unpacked_lun,
+ unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t gfp, unsigned int tag, int flags)
{
@@ -1577,7 +1556,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
if (ret) {
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
@@ -1633,9 +1612,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0;
- pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
- " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->t_task_cdb[0]);
+ pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
+ " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, sense_reason);
@@ -1692,13 +1670,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
+ target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+ }
trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo-> queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
@@ -1759,8 +1737,8 @@ static int target_write_prot_action(struct se_cmd *cmd)
break;
sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
- cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba,
- sectors, 0, NULL, 0);
+ cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+ sectors, 0, cmd->t_prot_sg, 0);
if (unlikely(cmd->pi_err)) {
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
@@ -1843,9 +1821,8 @@ void target_execute_cmd(struct se_cmd *cmd)
*/
spin_lock_irq(&cmd->t_state_lock);
if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
spin_unlock_irq(&cmd->t_state_lock);
complete_all(&cmd->t_transport_stop_comp);
@@ -1984,16 +1961,17 @@ static void transport_handle_queue_full(
static bool target_read_prot_action(struct se_cmd *cmd)
{
- sense_reason_t rc;
-
switch (cmd->prot_op) {
case TARGET_PROT_DIN_STRIP:
if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
- rc = sbc_dif_read_strip(cmd);
- if (rc) {
- cmd->pi_err = rc;
+ u32 sectors = cmd->data_length >>
+ ilog2(cmd->se_dev->dev_attrib.block_size);
+
+ cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+ sectors, 0, cmd->t_prot_sg,
+ 0);
+ if (cmd->pi_err)
return true;
- }
}
break;
case TARGET_PROT_DIN_INSERT:
@@ -2072,12 +2050,8 @@ static void target_complete_ok_work(struct work_struct *work)
queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
- cmd->data_length;
- }
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ atomic_long_add(cmd->data_length,
+ &cmd->se_lun->lun_stats.tx_data_octets);
/*
* Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be
@@ -2100,22 +2074,14 @@ queue_rsp:
goto queue_full;
break;
case DMA_TO_DEVICE:
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
- cmd->data_length;
- }
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ atomic_long_add(cmd->data_length,
+ &cmd->se_lun->lun_stats.rx_data_octets);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
if (cmd->se_cmd_flags & SCF_BIDI) {
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
- cmd->data_length;
- }
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ atomic_long_add(cmd->data_length,
+ &cmd->se_lun->lun_stats.tx_data_octets);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -2172,6 +2138,12 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
+ }
+
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
/*
* Release special case READ buffer payload required for
@@ -2195,10 +2167,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
-
- transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
- cmd->t_prot_sg = NULL;
- cmd->t_prot_nents = 0;
}
/**
@@ -2220,7 +2188,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
*/
- return target_put_sess_cmd(cmd->se_sess, cmd);
+ return target_put_sess_cmd(cmd);
}
/**
@@ -2337,6 +2305,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
int ret = 0;
bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ if (cmd->prot_op != TARGET_PROT_NORMAL &&
+ !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
+ cmd->prot_length, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
/*
* Determine is the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
@@ -2362,14 +2338,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (cmd->prot_op != TARGET_PROT_NORMAL) {
- ret = target_alloc_sgl(&cmd->t_prot_sg,
- &cmd->t_prot_nents,
- cmd->prot_length, true);
- if (ret < 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag);
if (ret < 0)
@@ -2464,13 +2432,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
EXPORT_SYMBOL(transport_generic_free_cmd);
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
- * @se_sess: session to reference
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
- bool ack_kref)
+int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
+ struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
int ret = 0;
@@ -2492,7 +2459,7 @@ out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
if (ret && ack_kref)
- target_put_sess_cmd(se_sess, se_cmd);
+ target_put_sess_cmd(se_cmd);
return ret;
}
@@ -2521,11 +2488,12 @@ static void target_release_cmd_kref(struct kref *kref)
}
/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
- * @se_sess: session to reference
* @se_cmd: command descriptor to drop
*/
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+int target_put_sess_cmd(struct se_cmd *se_cmd)
{
+ struct se_session *se_sess = se_cmd->se_sess;
+
if (!se_sess) {
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
@@ -2591,31 +2559,10 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
-static int transport_clear_lun_ref_thread(void *p)
+void transport_clear_lun_ref(struct se_lun *lun)
{
- struct se_lun *lun = p;
-
percpu_ref_kill(&lun->lun_ref);
-
wait_for_completion(&lun->lun_ref_comp);
- complete(&lun->lun_shutdown_comp);
-
- return 0;
-}
-
-int transport_clear_lun_ref(struct se_lun *lun)
-{
- struct task_struct *kt;
-
- kt = kthread_run(transport_clear_lun_ref_thread, lun,
- "tcm_cl_%u", lun->unpacked_lun);
- if (IS_ERR(kt)) {
- pr_err("Unable to start clear_lun thread\n");
- return PTR_ERR(kt);
- }
- wait_for_completion(&lun->lun_shutdown_comp);
-
- return 0;
}
/**
@@ -2649,10 +2596,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
cmd->transport_state |= CMD_T_STOP;
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
- " i_state: %d, t_state: %d, CMD_T_STOP\n",
- cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
+ cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2661,9 +2606,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
- pr_debug("wait_for_tasks: Stopped wait_for_completion("
- "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
+ cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2965,8 +2909,8 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
return 1;
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
+ cmd->t_task_cdb[0], cmd->tag);
cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -3005,9 +2949,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
transport_lun_remove_cmd(cmd);
- pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
- " ITT: 0x%08x\n", cmd->t_task_cdb[0],
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+ cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
@@ -3033,6 +2976,11 @@ static void target_tmr_work(struct work_struct *work)
ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
TMR_FUNCTION_REJECTED;
+ if (tmr->response == TMR_FUNCTION_COMPLETE) {
+ target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+ cmd->orig_fe_lun, 0x29,
+ ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
+ }
break;
case TMR_TARGET_WARM_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
@@ -3067,3 +3015,22 @@ int transport_generic_handle_tmr(
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+bool
+target_check_wce(struct se_device *dev)
+{
+ bool wce = false;
+
+ if (dev->transport->get_write_cache)
+ wce = dev->transport->get_write_cache(dev);
+ else if (dev->dev_attrib.emulate_write_cache > 0)
+ wce = true;
+
+ return wce;
+}
+
+bool
+target_check_fua(struct se_device *dev)
+{
+ return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index e44cc94b12cb..be25eb807a5f 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -29,7 +29,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -50,9 +49,17 @@ target_scsi3_ua_check(struct se_cmd *cmd)
if (!nacl)
return 0;
- deve = nacl->device_list[cmd->orig_fe_lun];
- if (!atomic_read(&deve->ua_count))
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
return 0;
+ }
+ if (!atomic_read(&deve->ua_count)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
/*
* From sam4r14, section 5.14 Unit attention condition:
*
@@ -79,18 +86,11 @@ target_scsi3_ua_check(struct se_cmd *cmd)
}
int core_scsi3_ua_allocate(
- struct se_node_acl *nacl,
- u32 unpacked_lun,
+ struct se_dev_entry *deve,
u8 asc,
u8 ascq)
{
- struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp;
- /*
- * PASSTHROUGH OPS
- */
- if (!nacl)
- return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!ua) {
@@ -99,13 +99,9 @@ int core_scsi3_ua_allocate(
}
INIT_LIST_HEAD(&ua->ua_nacl_list);
- ua->ua_nacl = nacl;
ua->ua_asc = asc;
ua->ua_ascq = ascq;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[unpacked_lun];
-
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
@@ -113,7 +109,6 @@ int core_scsi3_ua_allocate(
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
@@ -158,24 +153,40 @@ int core_scsi3_ua_allocate(
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
- pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
- " 0x%02x, ASCQ: 0x%02x\n",
- nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
+ " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
atomic_inc_mb(&deve->ua_count);
return 0;
}
+void target_ua_allocate_lun(struct se_node_acl *nacl,
+ u32 unpacked_lun, u8 asc, u8 ascq)
+{
+ struct se_dev_entry *deve;
+
+ if (!nacl)
+ return;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, unpacked_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return;
+ }
+
+ core_scsi3_ua_allocate(deve, asc, ascq);
+ rcu_read_unlock();
+}
+
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
@@ -210,10 +221,14 @@ void core_scsi3_ua_for_check_condition(
if (!nacl)
return;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[cmd->orig_fe_lun];
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return;
+ }
if (!atomic_read(&deve->ua_count)) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return;
}
/*
@@ -249,10 +264,10 @@ void core_scsi3_ua_for_check_condition(
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
pr_debug("[%s]: %s UNIT ATTENTION condition with"
- " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+ " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
@@ -278,10 +293,14 @@ int core_scsi3_ua_clear_for_request_sense(
if (!nacl)
return -EINVAL;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[cmd->orig_fe_lun];
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
if (!atomic_read(&deve->ua_count)) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return -EPERM;
}
/*
@@ -307,10 +326,10 @@ int core_scsi3_ua_clear_for_request_sense(
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
- " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+ " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index a6b56b364e7a..bd6e78ba153d 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -25,10 +25,14 @@
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
+#define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED 0x03
+#define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED 0x0E
+
extern struct kmem_cache *se_ua_cache;
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
-extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
+extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 549af9847c28..c448ef421ce7 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2013 Shaohua Li <shli@kernel.org>
* Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2015 Arrikto, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,7 +31,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
#include <linux/target_core_user.h>
@@ -168,6 +168,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->tcmu_dev = udev;
tcmu_cmd->data_length = se_cmd->data_length;
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+ tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
+ }
+
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
idr_preload(GFP_KERNEL);
@@ -226,9 +231,106 @@ static inline size_t head_to_end(size_t head, size_t size)
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
+static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
+ struct scatterlist *data_sg, unsigned int data_nents,
+ struct iovec **iov, int *iov_cnt, bool copy_data)
+{
+ int i;
+ void *from, *to;
+ size_t copy_bytes;
+ struct scatterlist *sg;
+
+ for_each_sg(data_sg, sg, data_nents, i) {
+ copy_bytes = min_t(size_t, sg->length,
+ head_to_end(udev->data_head, udev->data_size));
+ from = kmap_atomic(sg_page(sg)) + sg->offset;
+ to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
+
+ if (copy_data) {
+ memcpy(to, from, copy_bytes);
+ tcmu_flush_dcache_range(to, copy_bytes);
+ }
+
+ /* Even iov_base is relative to mb_addr */
+ (*iov)->iov_len = copy_bytes;
+ (*iov)->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
+ (*iov_cnt)++;
+ (*iov)++;
+
+ UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
+
+ /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
+ if (sg->length != copy_bytes) {
+ void *from_skip = from + copy_bytes;
+
+ copy_bytes = sg->length - copy_bytes;
+
+ (*iov)->iov_len = copy_bytes;
+ (*iov)->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
+
+ if (copy_data) {
+ to = (void *) udev->mb_addr +
+ udev->data_off + udev->data_head;
+ memcpy(to, from_skip, copy_bytes);
+ tcmu_flush_dcache_range(to, copy_bytes);
+ }
+
+ (*iov_cnt)++;
+ (*iov)++;
+
+ UPDATE_HEAD(udev->data_head,
+ copy_bytes, udev->data_size);
+ }
+
+ kunmap_atomic(from - sg->offset);
+ }
+}
+
+static void gather_and_free_data_area(struct tcmu_dev *udev,
+ struct scatterlist *data_sg, unsigned int data_nents)
+{
+ int i;
+ void *from, *to;
+ size_t copy_bytes;
+ struct scatterlist *sg;
+
+ /* It'd be easier to look at entry's iovec again, but UAM */
+ for_each_sg(data_sg, sg, data_nents, i) {
+ copy_bytes = min_t(size_t, sg->length,
+ head_to_end(udev->data_tail, udev->data_size));
+
+ to = kmap_atomic(sg_page(sg)) + sg->offset;
+ WARN_ON(sg->length + sg->offset > PAGE_SIZE);
+ from = (void *) udev->mb_addr +
+ udev->data_off + udev->data_tail;
+ tcmu_flush_dcache_range(from, copy_bytes);
+ memcpy(to, from, copy_bytes);
+
+ UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
+
+ /* Uh oh, wrapped the data buffer for this sg's data */
+ if (sg->length != copy_bytes) {
+ void *to_skip = to + copy_bytes;
+
+ from = (void *) udev->mb_addr +
+ udev->data_off + udev->data_tail;
+ WARN_ON(udev->data_tail);
+ copy_bytes = sg->length - copy_bytes;
+ tcmu_flush_dcache_range(from, copy_bytes);
+ memcpy(to_skip, from, copy_bytes);
+
+ UPDATE_HEAD(udev->data_tail,
+ copy_bytes, udev->data_size);
+ }
+ kunmap_atomic(to - sg->offset);
+ }
+}
+
/*
- * We can't queue a command until we have space available on the cmd ring *and* space
- * space avail on the data ring.
+ * We can't queue a command until we have space available on the cmd ring *and*
+ * space available on the data ring.
*
* Called with ring lock held.
*/
@@ -276,12 +378,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
size_t base_command_size, command_size;
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
- int i;
- struct scatterlist *sg;
struct iovec *iov;
- int iov_cnt = 0;
+ int iov_cnt;
uint32_t cmd_head;
uint64_t cdb_off;
+ bool copy_to_data_area;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
return -EINVAL;
@@ -294,7 +395,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* b/c size == offsetof one-past-element.
*/
base_command_size = max(offsetof(struct tcmu_cmd_entry,
- req.iov[se_cmd->t_data_nents + 2]),
+ req.iov[se_cmd->t_bidi_data_nents +
+ se_cmd->t_data_nents + 2]),
sizeof(struct tcmu_cmd_entry));
command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -362,53 +464,20 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* Fix up iovecs, and handle if allocation in data ring wrapped.
*/
iov = &entry->req.iov[0];
- for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
- size_t copy_bytes = min((size_t)sg->length,
- head_to_end(udev->data_head, udev->data_size));
- void *from = kmap_atomic(sg_page(sg)) + sg->offset;
- void *to = (void *) mb + udev->data_off + udev->data_head;
-
- if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
- memcpy(to, from, copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
- }
-
- /* Even iov_base is relative to mb_addr */
- iov->iov_len = copy_bytes;
- iov->iov_base = (void __user *) udev->data_off +
- udev->data_head;
- iov_cnt++;
- iov++;
-
- UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
-
- /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
- if (sg->length != copy_bytes) {
- from += copy_bytes;
- copy_bytes = sg->length - copy_bytes;
-
- iov->iov_len = copy_bytes;
- iov->iov_base = (void __user *) udev->data_off +
- udev->data_head;
-
- if (se_cmd->data_direction == DMA_TO_DEVICE) {
- to = (void *) mb + udev->data_off + udev->data_head;
- memcpy(to, from, copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
- }
-
- iov_cnt++;
- iov++;
-
- UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
- }
-
- kunmap_atomic(from);
- }
+ iov_cnt = 0;
+ copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
+ || se_cmd->se_cmd_flags & SCF_BIDI);
+ alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
entry->req.iov_cnt = iov_cnt;
- entry->req.iov_bidi_cnt = 0;
entry->req.iov_dif_cnt = 0;
+ /* Handle BIDI commands */
+ iov_cnt = 0;
+ alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+ se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
+ entry->req.iov_bidi_cnt = iov_cnt;
+
/* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -481,47 +550,22 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->scsi_sense_length);
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
- }
- else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- struct scatterlist *sg;
- int i;
-
- /* It'd be easier to look at entry's iovec again, but UAM */
- for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
- size_t copy_bytes;
- void *to;
- void *from;
-
- copy_bytes = min((size_t)sg->length,
- head_to_end(udev->data_tail, udev->data_size));
-
- to = kmap_atomic(sg_page(sg)) + sg->offset;
- WARN_ON(sg->length + sg->offset > PAGE_SIZE);
- from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
- tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to, from, copy_bytes);
-
- UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
-
- /* Uh oh, wrapped the data buffer for this sg's data */
- if (sg->length != copy_bytes) {
- from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
- WARN_ON(udev->data_tail);
- to += copy_bytes;
- copy_bytes = sg->length - copy_bytes;
- tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to, from, copy_bytes);
-
- UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
- }
-
- kunmap_atomic(to);
- }
-
+ } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ /* Discard data_out buffer */
+ UPDATE_HEAD(udev->data_tail,
+ (size_t)se_cmd->t_data_sg->length, udev->data_size);
+
+ /* Get Data-In buffer */
+ gather_and_free_data_area(udev,
+ se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ gather_and_free_data_area(udev,
+ se_cmd->t_data_sg, se_cmd->t_data_nents);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
- } else {
- pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
+ } else if (se_cmd->data_direction != DMA_NONE) {
+ pr_warn("TCMU: data direction was %d!\n",
+ se_cmd->data_direction);
}
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
@@ -910,6 +954,14 @@ static int tcmu_check_pending_cmd(int id, void *p, void *data)
return -EINVAL;
}
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ kfree(udev);
+}
+
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -935,8 +987,7 @@ static void tcmu_free_device(struct se_device *dev)
kfree(udev->uio_info.name);
kfree(udev->name);
}
-
- kfree(udev);
+ call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
enum {
@@ -1054,27 +1105,7 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_pass_op);
}
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
-TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
-TB_DEV_ATTR_RO(tcmu, hw_block_size);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
-TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
-TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
-
-static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
- &tcmu_dev_attrib_hw_pi_prot_type.attr,
- &tcmu_dev_attrib_hw_block_size.attr,
- &tcmu_dev_attrib_hw_max_sectors.attr,
- &tcmu_dev_attrib_hw_queue_depth.attr,
- NULL,
-};
-
-static struct se_subsystem_api tcmu_template = {
+static const struct target_backend_ops tcmu_ops = {
.name = "user",
.inquiry_prod = "USER",
.inquiry_rev = TCMU_VERSION,
@@ -1090,11 +1121,11 @@ static struct se_subsystem_api tcmu_template = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
+ .tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init tcmu_module_init(void)
{
- struct target_backend_cits *tbc = &tcmu_template.tb_cits;
int ret;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1117,10 +1148,7 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
- target_core_setup_sub_cits(&tcmu_template);
- tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
-
- ret = transport_subsystem_register(&tcmu_template);
+ ret = transport_backend_register(&tcmu_ops);
if (ret)
goto out_unreg_genl;
@@ -1138,7 +1166,7 @@ out_free_cache:
static void __exit tcmu_module_exit(void)
{
- transport_subsystem_release(&tcmu_template);
+ target_backend_unregister(&tcmu_ops);
genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 5ec0d00edaa3..4515f52546f8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -31,7 +31,6 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
@@ -348,8 +347,7 @@ struct xcopy_pt_cmd {
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
-static struct se_port xcopy_pt_port;
-static struct se_portal_group xcopy_pt_tpg;
+struct se_portal_group xcopy_pt_tpg;
static struct se_session xcopy_pt_sess;
static struct se_node_acl xcopy_pt_nacl;
@@ -358,11 +356,6 @@ static char *xcopy_pt_get_fabric_name(void)
return "xcopy-pt";
}
-static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -423,7 +416,6 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
static const struct target_core_fabric_ops xcopy_pt_tfo = {
.get_fabric_name = xcopy_pt_get_fabric_name,
- .get_task_tag = xcopy_pt_get_tag,
.get_cmd_state = xcopy_pt_get_cmd_state,
.release_cmd = xcopy_pt_release_cmd,
.check_stop_free = xcopy_pt_check_stop_free,
@@ -445,17 +437,11 @@ int target_xcopy_setup_pt(void)
return -ENOMEM;
}
- memset(&xcopy_pt_port, 0, sizeof(struct se_port));
- INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
- INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
- mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
-
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
- xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
@@ -496,10 +482,6 @@ static void target_xcopy_setup_pt_port(
*/
if (remote_port) {
xpt_cmd->remote_port = remote_port;
- pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
- pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
- " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
- pt_cmd->se_lun->lun_sep);
} else {
pt_cmd->se_lun = ec_cmd->se_lun;
pt_cmd->se_dev = ec_cmd->se_dev;
@@ -519,10 +501,6 @@ static void target_xcopy_setup_pt_port(
*/
if (remote_port) {
xpt_cmd->remote_port = remote_port;
- pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
- pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
- " cmd->se_lun->lun_sep for X-COPY data PULL\n",
- pt_cmd->se_lun->lun_sep);
} else {
pt_cmd->se_lun = ec_cmd->se_lun;
pt_cmd->se_dev = ec_cmd->se_dev;
@@ -574,6 +552,7 @@ static int target_xcopy_setup_pt_cmd(
xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+ cmd->tag = 0;
sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
if (sense_rc) {
ret = -EINVAL;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 881deb3d499a..39909dadef3e 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -80,8 +80,8 @@ struct ft_node_auth {
* Node ACL for FC remote port session.
*/
struct ft_node_acl {
- struct ft_node_auth node_auth;
struct se_node_acl se_node_acl;
+ struct ft_node_auth node_auth;
};
struct ft_lun {
@@ -157,7 +157,6 @@ int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
-u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
void ft_aborted_task(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 1bf78e7c994c..68031723e5be 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -36,7 +36,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -243,15 +242,6 @@ int ft_write_pending(struct se_cmd *se_cmd)
return 0;
}
-u32 ft_get_task_tag(struct se_cmd *se_cmd)
-{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- if (cmd->aborted)
- return ~0;
- return fc_seq_exch(cmd->seq)->rxid;
-}
-
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -564,6 +554,7 @@ static void ft_send_work(struct work_struct *work)
}
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 86b699b94c7b..16670933013b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -39,13 +39,10 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
-static const struct target_core_fabric_ops ft_fabric_ops;
-
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
@@ -194,48 +191,17 @@ static struct configfs_attribute *ft_nacl_base_attrs[] = {
* Add ACL for an initiator. The ACL is named arbitrarily.
* The port_name and/or node_name are attributes.
*/
-static struct se_node_acl *ft_add_acl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
{
- struct ft_node_acl *acl;
- struct ft_tpg *tpg;
+ struct ft_node_acl *acl =
+ container_of(nacl, struct ft_node_acl, se_node_acl);
u64 wwpn;
- u32 q_depth;
-
- pr_debug("add acl %s\n", name);
- tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
- if (!acl)
- return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
-
- q_depth = 32; /* XXX bogus default - get from tpg? */
- return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
- &acl->se_node_acl, name, q_depth);
-}
-
-static void ft_del_acl(struct se_node_acl *se_acl)
-{
- struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct ft_tpg *tpg;
- struct ft_node_acl *acl = container_of(se_acl,
- struct ft_node_acl, se_node_acl);
-
- pr_debug("del acl %s\n",
- config_item_name(&se_acl->acl_group.cg_item));
-
- tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
- acl, se_acl, tpg, &tpg->se_tpg);
-
- core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
- kfree(acl);
+ return 0;
}
struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
@@ -245,7 +211,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl;
- spin_lock_irq(&se_tpg->acl_node_lock);
+ mutex_lock(&se_tpg->acl_node_mutex);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n",
@@ -259,33 +225,10 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
break;
}
}
- spin_unlock_irq(&se_tpg->acl_node_lock);
+ mutex_unlock(&se_tpg->acl_node_mutex);
return found;
}
-static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct ft_node_acl *acl;
-
- acl = kzalloc(sizeof(*acl), GFP_KERNEL);
- if (!acl) {
- pr_err("Unable to allocate struct ft_node_acl\n");
- return NULL;
- }
- pr_debug("acl %p\n", acl);
- return &acl->se_node_acl;
-}
-
-static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_acl)
-{
- struct ft_node_acl *acl = container_of(se_acl,
- struct ft_node_acl, se_node_acl);
-
- pr_debug("acl %p\n", acl);
- kfree(acl);
-}
-
/*
* local_port port_group (tpg) ops.
*/
@@ -333,8 +276,7 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
- ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg,
- tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
destroy_workqueue(wq);
kfree(tpg);
@@ -459,6 +401,11 @@ static struct configfs_attribute *ft_wwn_attrs[] = {
NULL,
};
+static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
+{
+ return container_of(se_tpg, struct ft_tpg, se_tpg);
+}
+
static char *ft_get_fabric_name(void)
{
return "fc";
@@ -466,25 +413,16 @@ static char *ft_get_fabric_name(void)
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->lport_wwn->name;
+ return ft_tpg(se_tpg)->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
/*
* This tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
- return tpg->index;
-}
-
-static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
+ return ft_tpg(se_tpg)->index;
}
static int ft_check_false(struct se_portal_group *se_tpg)
@@ -498,28 +436,20 @@ static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->index;
+ return ft_tpg(se_tpg)->index;
}
static const struct target_core_fabric_ops ft_fabric_ops = {
.module = THIS_MODULE,
.name = "fc",
+ .node_acl_size = sizeof(struct ft_node_acl),
.get_fabric_name = ft_get_fabric_name,
- .get_fabric_proto_ident = fc_get_fabric_proto_ident,
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
- .tpg_get_default_depth = ft_get_default_depth,
- .tpg_get_pr_transport_id = fc_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
.tpg_check_demo_mode = ft_check_false,
.tpg_check_demo_mode_cache = ft_check_false,
.tpg_check_demo_mode_write_protect = ft_check_false,
.tpg_check_prod_mode_write_protect = ft_check_false,
- .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
- .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
@@ -530,7 +460,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
.write_pending = ft_write_pending,
.write_pending_status = ft_write_pending_status,
.set_default_node_attributes = ft_set_default_node_attr,
- .get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
@@ -544,12 +473,7 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = &ft_add_acl,
- .fabric_drop_nodeacl = &ft_del_acl,
+ .fabric_init_nodeacl = &ft_init_nodeacl,
.tfc_wwn_attrs = ft_wwn_attrs,
.tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index fe585d1cce23..4b0fedd6bd4b 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -44,7 +44,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index f2a616d4f2c4..31a9e3fb98c5 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -36,7 +36,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 978204333c94..d75a66c72750 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <linux/console.h>
#include <linux/pm_qos.h>
+#include <linux/pm_wakeirq.h>
#include <linux/dma-mapping.h>
#include "8250.h"
@@ -552,17 +553,6 @@ static void omap8250_uart_qos_work(struct work_struct *work)
pm_qos_update_request(&priv->pm_qos_request, priv->latency);
}
-static irqreturn_t omap_wake_irq(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- int ret;
-
- ret = port->handle_irq(port);
- if (ret)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
#ifdef CONFIG_SERIAL_8250_DMA
static int omap_8250_dma_handle_irq(struct uart_port *port);
#endif
@@ -596,11 +586,9 @@ static int omap_8250_startup(struct uart_port *port)
int ret;
if (priv->wakeirq) {
- ret = request_irq(priv->wakeirq, omap_wake_irq,
- port->irqflags, "uart wakeup irq", port);
+ ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq);
if (ret)
return ret;
- disable_irq(priv->wakeirq);
}
pm_runtime_get_sync(port->dev);
@@ -649,8 +637,7 @@ static int omap_8250_startup(struct uart_port *port)
err:
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
- if (priv->wakeirq)
- free_irq(priv->wakeirq, port);
+ dev_pm_clear_wake_irq(port->dev);
return ret;
}
@@ -682,10 +669,8 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
-
free_irq(port->irq, port);
- if (priv->wakeirq)
- free_irq(priv->wakeirq, port);
+ dev_pm_clear_wake_irq(port->dev);
}
static void omap_8250_throttle(struct uart_port *port)
@@ -1226,31 +1211,6 @@ static int omap8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
-static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
- bool enable)
-{
- if (!priv->wakeirq)
- return;
-
- if (enable)
- enable_irq(priv->wakeirq);
- else
- disable_irq_nosync(priv->wakeirq);
-}
-
-static void omap8250_enable_wakeup(struct omap8250_priv *priv,
- bool enable)
-{
- if (enable == priv->wakeups_enabled)
- return;
-
- omap8250_enable_wakeirq(priv, enable);
- priv->wakeups_enabled = enable;
-}
-#endif
-
#ifdef CONFIG_PM_SLEEP
static int omap8250_prepare(struct device *dev)
{
@@ -1277,11 +1237,6 @@ static int omap8250_suspend(struct device *dev)
serial8250_suspend_port(priv->line);
flush_work(&priv->qos_work);
-
- if (device_may_wakeup(dev))
- omap8250_enable_wakeup(priv, true);
- else
- omap8250_enable_wakeup(priv, false);
return 0;
}
@@ -1289,9 +1244,6 @@ static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
- if (device_may_wakeup(dev))
- omap8250_enable_wakeup(priv, false);
-
serial8250_resume_port(priv->line);
return 0;
}
@@ -1333,7 +1285,6 @@ static int omap8250_runtime_suspend(struct device *dev)
return -EBUSY;
}
- omap8250_enable_wakeup(priv, true);
if (up->dma)
omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
@@ -1354,7 +1305,6 @@ static int omap8250_runtime_resume(struct device *dev)
return 0;
up = serial8250_get_port(priv->line);
- omap8250_enable_wakeup(priv, false);
loss_cntx = omap8250_lost_context(up);
if (loss_cntx)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7f49172ccd86..7a2172b5e93c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -38,6 +38,7 @@
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/gpio.h>
@@ -160,7 +161,6 @@ struct uart_omap_port {
unsigned long port_activity;
int context_loss_cnt;
u32 errata;
- u8 wakeups_enabled;
u32 features;
int rts_gpio;
@@ -209,28 +209,11 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
return pdata->get_context_loss_count(up->dev);
}
-static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
- bool enable)
-{
- if (!up->wakeirq)
- return;
-
- if (enable)
- enable_irq(up->wakeirq);
- else
- disable_irq_nosync(up->wakeirq);
-}
-
+/* REVISIT: Remove this when omap3 boots in device tree only mode */
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
- if (enable == up->wakeups_enabled)
- return;
-
- serial_omap_enable_wakeirq(up, enable);
- up->wakeups_enabled = enable;
-
if (!pdata || !pdata->enable_wakeup)
return;
@@ -750,13 +733,11 @@ static int serial_omap_startup(struct uart_port *port)
/* Optional wake-up IRQ */
if (up->wakeirq) {
- retval = request_irq(up->wakeirq, serial_omap_irq,
- up->port.irqflags, up->name, up);
+ retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq);
if (retval) {
free_irq(up->port.irq, up);
return retval;
}
- disable_irq(up->wakeirq);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -845,8 +826,7 @@ static void serial_omap_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
- if (up->wakeirq)
- free_irq(up->wakeirq, up);
+ dev_pm_clear_wake_irq(up->dev);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
@@ -1139,13 +1119,6 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
- if (!device_may_wakeup(up->dev)) {
- if (!state)
- pm_runtime_forbid(up->dev);
- else
- pm_runtime_allow(up->dev);
- }
-
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 3cc109f3c9c8..d2259c663996 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2936,7 +2936,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
if (fsg_lun_is_open(lun)) {
p = "(error)";
if (pathbuf) {
- p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX);
+ p = file_path(lun->filp, pathbuf, PATH_MAX);
if (IS_ERR(p))
p = "(error)";
}
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 648f9e489b39..d62683017cf3 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -341,7 +341,7 @@ ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
down_read(filesem);
if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */
- p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+ p = file_path(curlun->filp, buf, PAGE_SIZE - 1);
if (IS_ERR(p))
rc = PTR_ERR(p);
else {
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 6ce932f90ef8..c3c48088fced 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -20,7 +20,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <asm/unaligned.h>
@@ -28,8 +27,6 @@
USB_GADGET_COMPOSITE_OPTIONS();
-static const struct target_core_fabric_ops usbg_ops;
-
static inline struct f_uas *to_f_uas(struct usb_function *f)
{
return container_of(f, struct f_uas, function);
@@ -1111,6 +1108,7 @@ static int usbg_submit_command(struct f_uas *fu,
memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
cmd->tag = be16_to_cpup(&cmd_iu->tag);
+ cmd->se_cmd.tag = cmd->tag;
if (fu->flags & USBG_USE_STREAMS) {
if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
goto err;
@@ -1244,6 +1242,7 @@ static int bot_submit_command(struct f_uas *fu,
cmd->unpacked_lun = cbw->Lun;
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
+ cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
INIT_WORK(&cmd->work, bot_cmd_work);
ret = queue_work(tpg->workqueue, &cmd->work);
@@ -1273,23 +1272,6 @@ static char *usbg_get_fabric_name(void)
return "usb_gadget";
}
-static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- u8 proto_id;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- proto_id = sas_get_fabric_proto_ident(se_tpg);
- break;
- }
-
- return proto_id;
-}
-
static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct usbg_tpg *tpg = container_of(se_tpg,
@@ -1306,97 +1288,6 @@ static u16 usbg_get_tag(struct se_portal_group *se_tpg)
return tpg->tport_tpgt;
}
-static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 usbg_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- int ret = 0;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- break;
- }
-
- return ret;
-}
-
-static u32 usbg_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- int ret = 0;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- break;
- }
-
- return ret;
-}
-
-static char *usbg_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- char *tid = NULL;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- }
-
- return tid;
-}
-
-static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct usbg_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
- if (!nacl)
- return NULL;
-
- return &nacl->se_node_acl;
-}
-
-static void usbg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct usbg_nacl *nacl = container_of(se_nacl,
- struct usbg_nacl, se_node_acl);
- kfree(nacl);
-}
-
static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -1447,18 +1338,6 @@ static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
return;
}
-static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- struct f_uas *fu = cmd->fu;
-
- if (fu->flags & USBG_IS_BOT)
- return le32_to_cpu(cmd->bot_tag);
- else
- return cmd->tag;
-}
-
static int usbg_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -1488,50 +1367,11 @@ static const char *usbg_check_wwn(const char *name)
return n;
}
-static struct se_node_acl *usbg_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
-{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct usbg_nacl *nacl;
- u64 wwpn = 0;
- u32 nexus_depth;
- const char *wnn_name;
-
- wnn_name = usbg_check_wwn(name);
- if (!wnn_name)
- return ERR_PTR(-EINVAL);
- se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
- if (!(se_nacl_new))
- return ERR_PTR(-ENOMEM);
-
- nexus_depth = 1;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- usbg_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
- /*
- * Locate our struct usbg_nacl and set the FC Nport WWPN
- */
- nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
- nacl->iport_wwpn = wwpn;
- snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
- return se_nacl;
-}
-
-static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
+static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
- struct usbg_nacl *nacl = container_of(se_acl,
- struct usbg_nacl, se_node_acl);
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
+ if (!usbg_check_wwn(name))
+ return -EINVAL;
+ return 0;
}
struct usbg_tpg *the_only_tpg_I_currently_have;
@@ -1571,8 +1411,11 @@ static struct se_portal_group *usbg_make_tpg(
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ /*
+ * SPC doesn't assign a protocol identifier for USB-SCSI, so we
+ * pretend to be SAS..
+ */
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
if (ret < 0) {
destroy_workqueue(tpg->workqueue);
kfree(tpg);
@@ -1866,19 +1709,12 @@ static const struct target_core_fabric_ops usbg_ops = {
.module = THIS_MODULE,
.name = "usb_gadget",
.get_fabric_name = usbg_get_fabric_name,
- .get_fabric_proto_ident = usbg_get_fabric_proto_ident,
.tpg_get_wwn = usbg_get_fabric_wwn,
.tpg_get_tag = usbg_get_tag,
- .tpg_get_default_depth = usbg_get_default_depth,
- .tpg_get_pr_transport_id = usbg_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = usbg_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = usbg_parse_pr_out_transport_id,
.tpg_check_demo_mode = usbg_check_true,
.tpg_check_demo_mode_cache = usbg_check_false,
.tpg_check_demo_mode_write_protect = usbg_check_false,
.tpg_check_prod_mode_write_protect = usbg_check_false,
- .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
- .tpg_release_fabric_acl = usbg_release_fabric_acl,
.tpg_get_inst_index = usbg_tpg_get_inst_index,
.release_cmd = usbg_release_cmd,
.shutdown_session = usbg_shutdown_session,
@@ -1888,7 +1724,6 @@ static const struct target_core_fabric_ops usbg_ops = {
.write_pending = usbg_send_write_request,
.write_pending_status = usbg_write_pending_status,
.set_default_node_attributes = usbg_set_default_node_attrs,
- .get_task_tag = usbg_get_task_tag,
.get_cmd_state = usbg_get_cmd_state,
.queue_data_in = usbg_send_read_response,
.queue_status = usbg_send_status_response,
@@ -1902,10 +1737,7 @@ static const struct target_core_fabric_ops usbg_ops = {
.fabric_drop_tpg = usbg_drop_tpg,
.fabric_post_link = usbg_port_link,
.fabric_pre_unlink = usbg_port_unlink,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = usbg_make_nodeacl,
- .fabric_drop_nodeacl = usbg_drop_nodeacl,
+ .fabric_init_nodeacl = usbg_init_nodeacl,
.tfc_wwn_attrs = usbg_wwn_attrs,
.tfc_tpg_base_attrs = usbg_base_attrs,
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.h b/drivers/usb/gadget/legacy/tcm_usb_gadget.h
index 9fb3544cc80f..0b749e1aa2f1 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.h
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.h
@@ -24,15 +24,6 @@ enum {
#define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1
-struct usbg_nacl {
- /* Binary World Wide unique Port Name for SAS Initiator port */
- u64 iport_wwpn;
- /* ASCII formatted WWPN for Sas Initiator port */
- char iport_name[USBG_NAMELEN];
- /* Returned by usbg_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
struct tcm_usbg_nexus {
struct se_session *tvn_se_sess;
};
@@ -52,8 +43,6 @@ struct usbg_tpg {
};
struct usbg_tport {
- /* SCSI protocol the tport is providing */
- u8 tport_proto_id;
/* Binary World Wide unique Port Name for SAS Target port */
u64 tport_wwpn;
/* ASCII formatted WWPN for SAS Target port */
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 017a1e8a8f6f..533eaf04f12f 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -32,3 +32,18 @@ config VHOST
---help---
This option is selected by any driver which needs to access
the core of vhost.
+
+config VHOST_CROSS_ENDIAN_LEGACY
+ bool "Cross-endian support for vhost"
+ default n
+ ---help---
+ This option allows vhost to support guests with a different byte
+ ordering from host while using legacy virtio.
+
+ Userspace programs can control the feature using the
+ VHOST_SET_VRING_ENDIAN and VHOST_GET_VRING_ENDIAN ioctls.
+
+ This is only useful on a few platforms (ppc64 and arm64). Since it
+ adds some overhead, it is disabled by default.
+
+ If unsure, say "N".
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 55722feeb898..dfcc02c93648 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -43,7 +43,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <linux/vhost.h>
#include <linux/virtio_scsi.h>
@@ -117,15 +116,6 @@ struct vhost_scsi_nexus {
struct se_session *tvn_se_sess;
};
-struct vhost_scsi_nacl {
- /* Binary World Wide unique Port Name for Vhost Initiator port */
- u64 iport_wwpn;
- /* ASCII formatted WWPN for Sas Initiator port */
- char iport_name[VHOST_SCSI_NAMELEN];
- /* Returned by vhost_scsi_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
@@ -218,7 +208,6 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
-static struct target_core_fabric_ops vhost_scsi_ops;
static struct workqueue_struct *vhost_scsi_workqueue;
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -299,28 +288,6 @@ static char *vhost_scsi_get_fabric_name(void)
return "vhost";
}
-static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct vhost_scsi_tpg *tpg = container_of(se_tpg,
- struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_FCP:
- return fc_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_fabric_proto_ident(se_tpg);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_get_fabric_proto_ident(se_tpg);
-}
-
static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -337,102 +304,6 @@ static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
return tpg->tport_tpgt;
}
-static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32
-vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct vhost_scsi_tpg *tpg = container_of(se_tpg,
- struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
-}
-
-static u32
-vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct vhost_scsi_tpg *tpg = container_of(se_tpg,
- struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
-}
-
-static char *
-vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct vhost_scsi_tpg *tpg = container_of(se_tpg,
- struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_FCP:
- return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
-}
-
static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -441,29 +312,6 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
return tpg->tv_fabric_prot_type;
}
-static struct se_node_acl *
-vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct vhost_scsi_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct vhost_scsi_nacl\n");
- return NULL;
- }
-
- return &nacl->se_node_acl;
-}
-
-static void
-vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct vhost_scsi_nacl *nacl = container_of(se_nacl,
- struct vhost_scsi_nacl, se_node_acl);
- kfree(nacl);
-}
-
static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -521,11 +369,6 @@ static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
return;
}
-static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -609,7 +452,7 @@ static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ return target_put_sess_cmd(se_cmd);
}
static void
@@ -970,6 +813,7 @@ static void vhost_scsi_submission_work(struct work_struct *work)
}
tv_nexus = cmd->tvc_nexus;
+ se_cmd->tag = 0;
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
@@ -1824,50 +1668,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
mutex_unlock(&vhost_scsi_mutex);
}
-static struct se_node_acl *
-vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
-{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct vhost_scsi_nacl *nacl;
- u64 wwpn = 0;
- u32 nexus_depth;
-
- /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
- return ERR_PTR(-EINVAL); */
- se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-
- nexus_depth = 1;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
- /*
- * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
- */
- nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
- nacl->iport_wwpn = wwpn;
-
- return se_nacl;
-}
-
-static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
-{
- struct vhost_scsi_nacl *nacl = container_of(se_acl,
- struct vhost_scsi_nacl, se_node_acl);
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
-}
-
static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
struct se_session *se_sess)
{
@@ -2202,8 +2002,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&vhost_scsi_ops, wwn,
- &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -2327,20 +2126,13 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
- .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
- .tpg_get_default_depth = vhost_scsi_get_default_depth,
- .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id,
.tpg_check_demo_mode = vhost_scsi_check_true,
.tpg_check_demo_mode_cache = vhost_scsi_check_true,
.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
.tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
- .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
- .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
.tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
.release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
@@ -2351,7 +2143,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.write_pending = vhost_scsi_write_pending,
.write_pending_status = vhost_scsi_write_pending_status,
.set_default_node_attributes = vhost_scsi_set_default_node_attrs,
- .get_task_tag = vhost_scsi_get_task_tag,
.get_cmd_state = vhost_scsi_get_cmd_state,
.queue_data_in = vhost_scsi_queue_data_in,
.queue_status = vhost_scsi_queue_status,
@@ -2366,10 +2157,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.fabric_drop_tpg = vhost_scsi_drop_tpg,
.fabric_post_link = vhost_scsi_port_link,
.fabric_pre_unlink = vhost_scsi_port_unlink,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = vhost_scsi_make_nodeacl,
- .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
.tfc_wwn_attrs = vhost_scsi_wwn_attrs,
.tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2ee28266fd07..9e8e004bb1c3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -36,6 +36,77 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+{
+ vq->user_be = !virtio_legacy_is_little_endian();
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ struct vhost_vring_state s;
+
+ if (vq->private_data)
+ return -EBUSY;
+
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+
+ if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
+ s.num != VHOST_VRING_BIG_ENDIAN)
+ return -EINVAL;
+
+ vq->user_be = s.num;
+
+ return 0;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ struct vhost_vring_state s = {
+ .index = idx,
+ .num = vq->user_be
+ };
+
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ /* Note for legacy virtio: user_be is initialized at reset time
+ * according to the host endianness. If userspace does not set an
+ * explicit endianness, the default behavior is native endian, as
+ * expected by legacy virtio.
+ */
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
+}
+#else
+static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+{
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
+ vq->is_le = true;
+}
+#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -199,6 +270,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call = NULL;
vq->log_ctx = NULL;
vq->memory = NULL;
+ vq->is_le = virtio_legacy_is_little_endian();
+ vhost_vq_reset_user_be(vq);
}
static int vhost_worker(void *data)
@@ -806,6 +879,12 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
} else
filep = eventfp;
break;
+ case VHOST_SET_VRING_ENDIAN:
+ r = vhost_set_vring_endian(vq, argp);
+ break;
+ case VHOST_GET_VRING_ENDIAN:
+ r = vhost_get_vring_endian(vq, idx, argp);
+ break;
default:
r = -ENOIOCTLCMD;
}
@@ -1044,8 +1123,12 @@ int vhost_init_used(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
- if (!vq->private_data)
+ if (!vq->private_data) {
+ vq->is_le = virtio_legacy_is_little_endian();
return 0;
+ }
+
+ vhost_init_is_le(vq);
r = vhost_update_used_flags(vq);
if (r)
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8c1c792900ba..ce6f6da4b09f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -106,6 +106,14 @@ struct vhost_virtqueue {
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
+
+ /* Ring endianness. Defaults to legacy native endianness.
+ * Set to true when starting a modern virtio device. */
+ bool is_le;
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+ /* Ring endianness requested by userspace for cross-endian support. */
+ bool user_be;
+#endif
};
struct vhost_dev {
@@ -173,34 +181,39 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
return vq->acked_features & (1ULL << bit);
}
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return vq->is_le;
+}
+
/* Memory accessors */
static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
{
- return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
}
static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
{
- return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
}
static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
{
- return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
}
static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
{
- return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
}
static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
{
- return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
}
static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
{
- return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
}
#endif
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 612b093831d5..9200a8668b49 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -1225,6 +1225,15 @@ static int dss_add_child_component(struct device *dev, void *data)
{
struct component_match **match = data;
+ /*
+ * HACK
+ * We don't have a working driver for rfbi, so skip it here always.
+ * Otherwise dss will never get probed successfully, as it will wait
+ * for rfbi to get probed.
+ */
+ if (strstr(dev_name(dev), "rfbi"))
+ return 0;
+
component_match_add(dev->parent, match, dss_component_compare, dev);
return 0;
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 86621fabbb8b..735355b0e023 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -121,6 +121,7 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#define REG_3 0x0004a0
#define REG_4 0x000600
#define REG_6 0x000800
+#define REG_7 0x000804
#define REG_8 0x000820
#define REG_9 0x000a04
#define REG_10 0x018000
@@ -135,6 +136,8 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#define REG_21 0x200218
#define REG_22 0x0005a0
#define REG_23 0x0005c0
+#define REG_24 0x000808
+#define REG_25 0x000b00
#define REG_26 0x200118
#define REG_27 0x200308
#define REG_32 0x21003c
@@ -429,6 +432,9 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
#define SET_LENXY_START_RECFILL(fb, lenxy) \
WRITE_WORD(lenxy, fb, REG_9)
+#define SETUP_COPYAREA(fb) \
+ WRITE_BYTE(0, fb, REG_16b1)
+
static void
HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
{
@@ -1004,6 +1010,36 @@ stifb_blank(int blank_mode, struct fb_info *info)
return 0;
}
+static void
+stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ SETUP_COPYAREA(fb);
+
+ SETUP_HW(fb);
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
+ IBOvals(RopSrc, MaskAddrOffset(0),
+ BitmapExtent08, StaticReg(1),
+ DataDynamic, MaskOtc, BGx(0), FGx(0)));
+
+ WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
+ WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
+ WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1069,7 +1105,7 @@ static struct fb_ops stifb_ops = {
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1258,7 +1294,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
info->fbops = &stifb_ops;
info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 5447b8186332..78f804af6c20 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -507,10 +507,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto err_enable_device;
- rc = pci_request_regions(pci_dev, "virtio-pci");
- if (rc)
- goto err_request_regions;
-
if (force_legacy) {
rc = virtio_pci_legacy_probe(vp_dev);
/* Also try modern mode if we can't map BAR0 (no IO space). */
@@ -540,8 +536,6 @@ err_register:
else
virtio_pci_modern_remove(vp_dev);
err_probe:
- pci_release_regions(pci_dev);
-err_request_regions:
pci_disable_device(pci_dev);
err_enable_device:
kfree(vp_dev);
@@ -559,7 +553,6 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
else
virtio_pci_modern_remove(vp_dev);
- pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 28ee4e56badf..b976d968e793 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -75,6 +75,8 @@ struct virtio_pci_device {
/* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier;
+ int modern_bars;
+
/* Legacy only field */
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 256a5278a515..48bc9797e530 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -215,6 +215,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
{
struct pci_dev *pci_dev = vp_dev->pci_dev;
+ int rc;
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
@@ -226,9 +227,14 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
+ rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
+ if (rc)
+ return rc;
+
+ rc = -ENOMEM;
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
if (!vp_dev->ioaddr)
- return -ENOMEM;
+ goto err_iomap;
vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
@@ -246,6 +252,10 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
vp_dev->del_vq = del_vq;
return 0;
+
+err_iomap:
+ pci_release_region(pci_dev, 0);
+ return rc;
}
void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
@@ -253,4 +263,5 @@ void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
struct pci_dev *pci_dev = vp_dev->pci_dev;
pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_region(pci_dev, 0);
}
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index e88e0997a889..8e5cf194cc0b 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -499,7 +499,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
* Returns offset of the capability, or 0.
*/
static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
- u32 ioresource_types)
+ u32 ioresource_types, int *bars)
{
int pos;
@@ -520,8 +520,10 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
if (type == cfg_type) {
if (pci_resource_len(dev, bar) &&
- pci_resource_flags(dev, bar) & ioresource_types)
+ pci_resource_flags(dev, bar) & ioresource_types) {
+ *bars |= (1 << bar);
return pos;
+ }
}
}
return 0;
@@ -617,7 +619,8 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
/* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
- IORESOURCE_IO | IORESOURCE_MEM);
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &vp_dev->modern_bars);
if (!common) {
dev_info(&pci_dev->dev,
"virtio_pci: leaving for legacy driver\n");
@@ -626,9 +629,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
/* If common is there, these should be too... */
isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
- IORESOURCE_IO | IORESOURCE_MEM);
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &vp_dev->modern_bars);
notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
- IORESOURCE_IO | IORESOURCE_MEM);
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &vp_dev->modern_bars);
if (!isr || !notify) {
dev_err(&pci_dev->dev,
"virtio_pci: missing capabilities %i/%i/%i\n",
@@ -640,7 +645,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
* device-specific configuration.
*/
device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
- IORESOURCE_IO | IORESOURCE_MEM);
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &vp_dev->modern_bars);
+
+ err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
+ "virtio-pci-modern");
+ if (err)
+ return err;
err = -EINVAL;
vp_dev->common = map_capability(pci_dev, common,
@@ -727,4 +738,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
pci_iounmap(pci_dev, vp_dev->notify_base);
pci_iounmap(pci_dev, vp_dev->isr);
pci_iounmap(pci_dev, vp_dev->common);
+ pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 39223c3e99ad..9eeefd7cad41 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -53,7 +53,6 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/target_core_fabric_configfs.h>
#include <asm/hypervisor.h>
@@ -201,8 +200,6 @@ static LIST_HEAD(scsiback_free_pages);
static DEFINE_MUTEX(scsiback_mutex);
static LIST_HEAD(scsiback_list);
-static const struct target_core_fabric_ops scsiback_ops;
-
static void scsiback_get(struct vscsibk_info *info)
{
atomic_inc(&info->nr_unreplied_reqs);
@@ -397,6 +394,7 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
memset(se_cmd, 0, sizeof(*se_cmd));
scsiback_get(pending_req->info);
+ se_cmd->tag = pending_req->rqid;
rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
pending_req->sense_buffer, pending_req->v2p->lun,
pending_req->data_len, 0,
@@ -863,7 +861,8 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
char *lunp;
- unsigned int lun;
+ unsigned long long unpacked_lun;
+ struct se_lun *se_lun;
struct scsiback_tpg *tpg_entry, *tpg = NULL;
char *error = "doesn't exist";
@@ -874,24 +873,27 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
}
*lunp = 0;
lunp++;
- if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ err = kstrtoull(lunp, 10, &unpacked_lun);
+ if (err < 0) {
pr_err("lun number not valid: %s\n", lunp);
- return -EINVAL;
+ return err;
}
mutex_lock(&scsiback_mutex);
list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
if (!strcmp(phy, tpg_entry->tport->tport_name) ||
!strcmp(phy, tpg_entry->param_alias)) {
- spin_lock(&tpg_entry->se_tpg.tpg_lun_lock);
- if (tpg_entry->se_tpg.tpg_lun_list[lun]->lun_status ==
- TRANSPORT_LUN_STATUS_ACTIVE) {
- if (!tpg_entry->tpg_nexus)
- error = "nexus undefined";
- else
- tpg = tpg_entry;
+ mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex);
+ hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) {
+ if (se_lun->unpacked_lun == unpacked_lun) {
+ if (!tpg_entry->tpg_nexus)
+ error = "nexus undefined";
+ else
+ tpg = tpg_entry;
+ break;
+ }
}
- spin_unlock(&tpg_entry->se_tpg.tpg_lun_lock);
+ mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex);
break;
}
}
@@ -903,7 +905,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
mutex_unlock(&scsiback_mutex);
if (!tpg) {
- pr_err("%s:%d %s\n", phy, lun, error);
+ pr_err("%s:%llu %s\n", phy, unpacked_lun, error);
return -ENODEV;
}
@@ -931,7 +933,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
kref_init(&new->kref);
new->v = *v;
new->tpg = tpg;
- new->lun = lun;
+ new->lun = unpacked_lun;
list_add_tail(&new->l, head);
out:
@@ -1251,28 +1253,6 @@ static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
return "Unknown";
}
-static u8 scsiback_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct scsiback_tpg *tpg = container_of(se_tpg,
- struct scsiback_tpg, se_tpg);
- struct scsiback_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_FCP:
- return fc_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_fabric_proto_ident(se_tpg);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
- tport->tport_proto_id);
- break;
- }
-
- return sas_get_fabric_proto_ident(se_tpg);
-}
-
static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct scsiback_tpg *tpg = container_of(se_tpg,
@@ -1289,102 +1269,6 @@ static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
return tpg->tport_tpgt;
}
-static u32 scsiback_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32
-scsiback_get_pr_transport_id(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct scsiback_tpg *tpg = container_of(se_tpg,
- struct scsiback_tpg, se_tpg);
- struct scsiback_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
- tport->tport_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
-}
-
-static u32
-scsiback_get_pr_transport_id_len(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct scsiback_tpg *tpg = container_of(se_tpg,
- struct scsiback_tpg, se_tpg);
- struct scsiback_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
- tport->tport_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
-}
-
-static char *
-scsiback_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct scsiback_tpg *tpg = container_of(se_tpg,
- struct scsiback_tpg, se_tpg);
- struct scsiback_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_FCP:
- return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
- tport->tport_proto_id);
- break;
- }
-
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
-}
-
static struct se_wwn *
scsiback_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
@@ -1451,19 +1335,6 @@ static void scsiback_drop_tport(struct se_wwn *wwn)
kfree(tport);
}
-static struct se_node_acl *
-scsiback_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- return kzalloc(sizeof(struct se_node_acl), GFP_KERNEL);
-}
-
-static void
-scsiback_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- kfree(se_nacl);
-}
-
static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
@@ -1522,14 +1393,6 @@ static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
{
}
-static u32 scsiback_get_task_tag(struct se_cmd *se_cmd)
-{
- struct vscsibk_pend *pending_req = container_of(se_cmd,
- struct vscsibk_pend, se_cmd);
-
- return pending_req->rqid;
-}
-
static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
@@ -1898,8 +1761,7 @@ scsiback_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&scsiback_ops, wwn,
- &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -1944,23 +1806,15 @@ static const struct target_core_fabric_ops scsiback_ops = {
.module = THIS_MODULE,
.name = "xen-pvscsi",
.get_fabric_name = scsiback_get_fabric_name,
- .get_fabric_proto_ident = scsiback_get_fabric_proto_ident,
.tpg_get_wwn = scsiback_get_fabric_wwn,
.tpg_get_tag = scsiback_get_tag,
- .tpg_get_default_depth = scsiback_get_default_depth,
- .tpg_get_pr_transport_id = scsiback_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = scsiback_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = scsiback_parse_pr_out_transport_id,
.tpg_check_demo_mode = scsiback_check_true,
.tpg_check_demo_mode_cache = scsiback_check_true,
.tpg_check_demo_mode_write_protect = scsiback_check_false,
.tpg_check_prod_mode_write_protect = scsiback_check_false,
- .tpg_alloc_fabric_acl = scsiback_alloc_fabric_acl,
- .tpg_release_fabric_acl = scsiback_release_fabric_acl,
.tpg_get_inst_index = scsiback_tpg_get_inst_index,
.check_stop_free = scsiback_check_stop_free,
.release_cmd = scsiback_release_cmd,
- .put_session = NULL,
.shutdown_session = scsiback_shutdown_session,
.close_session = scsiback_close_session,
.sess_get_index = scsiback_sess_get_index,
@@ -1968,7 +1822,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
.write_pending = scsiback_write_pending,
.write_pending_status = scsiback_write_pending_status,
.set_default_node_attributes = scsiback_set_default_node_attrs,
- .get_task_tag = scsiback_get_task_tag,
.get_cmd_state = scsiback_get_cmd_state,
.queue_data_in = scsiback_queue_data_in,
.queue_status = scsiback_queue_status,
@@ -1983,12 +1836,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
.fabric_drop_tpg = scsiback_drop_tpg,
.fabric_post_link = scsiback_port_link,
.fabric_pre_unlink = scsiback_port_unlink,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
-#if 0
- .fabric_make_nodeacl = scsiback_make_nodeacl,
- .fabric_drop_nodeacl = scsiback_drop_nodeacl,
-#endif
.tfc_wwn_attrs = scsiback_wwn_attrs,
.tfc_tpg_base_attrs = scsiback_tpg_attrs,