aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-cadence.txt6
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt13
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt10
-rw-r--r--Documentation/features/io/dma-api-debug/arch-support.txt2
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt2
-rw-r--r--Documentation/features/io/sg-chain/arch-support.txt2
-rw-r--r--Documentation/scsi/g_NCR5380.txt46
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py5
-rw-r--r--Documentation/x86/intel_rdt_ui.txt214
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/cacheflush.h6
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h6
-rw-r--r--arch/arc/kernel/entry-arcv2.S24
-rw-r--r--arch/arc/kernel/entry-compact.S2
-rw-r--r--arch/arc/kernel/intc-arcv2.c10
-rw-r--r--arch/arc/mm/cache.c28
-rw-r--r--arch/arm/boot/dts/hisi-x5hd2.dtsi4
-rw-r--r--arch/arm64/include/asm/acpi.h2
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/kernel/acpi.c7
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h6
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c6
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/timer.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/elf.h7
-rw-r--r--arch/parisc/include/asm/pdcpat.h2
-rw-r--r--arch/parisc/include/asm/processor.h4
-rw-r--r--arch/parisc/kernel/entry.S12
-rw-r--r--arch/parisc/kernel/firmware.c2
-rw-r--r--arch/parisc/kernel/inventory.c8
-rw-r--r--arch/parisc/kernel/perf.c5
-rw-r--r--arch/parisc/kernel/process.c6
-rw-r--r--arch/parisc/kernel/processor.c29
-rw-r--r--arch/parisc/kernel/sys_parisc.c18
-rw-r--r--arch/parisc/kernel/time.c112
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/ima.h29
-rw-r--r--arch/powerpc/include/asm/kexec.h15
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/ima_kexec.c223
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c15
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c3
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/events/intel/cqm.c23
-rw-r--r--arch/x86/include/asm/cpufeatures.h4
-rw-r--r--arch/x86/include/asm/intel_rdt.h224
-rw-r--r--arch/x86/include/asm/intel_rdt_common.h27
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c20
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c403
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1115
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c245
-rw-r--r--arch/x86/kernel/cpu/scattered.c11
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/boot/dts/kc705.dts16
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/pci-dma.c21
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c128
-rw-r--r--arch/xtensa/kernel/setup.c137
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/ioctl.c3
-rw-r--r--block/scsi_ioctl.c3
-rw-r--r--drivers/acpi/acpica/actables.h6
-rw-r--r--drivers/acpi/acpica/tbfadt.c14
-rw-r--r--drivers/acpi/acpica/tbutils.c85
-rw-r--r--drivers/acpi/acpica/tbxface.c130
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/nfit/core.c3
-rw-r--r--drivers/acpi/osl.c15
-rw-r--r--drivers/acpi/processor_core.c8
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/spcr.c8
-rw-r--r--drivers/acpi/tables.c17
-rw-r--r--drivers/base/cacheinfo.c5
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c935
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c211
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/ast/ast_main.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c79
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c31
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h21
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c24
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ucontext.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c17
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c548
-rw-r--r--drivers/mailbox/mailbox-sti.c1
-rw-r--r--drivers/mailbox/mailbox-test.c92
-rw-r--r--drivers/mailbox/pcc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/btree.c39
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/request.c4
-rw-r--r--drivers/md/bcache/super.c7
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/sd.c12
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci.c33
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c24
-rw-r--r--drivers/net/phy/phy_device.c22
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h41
-rw-r--r--drivers/s390/scsi/zfcp_erp.c61
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h3
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c61
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c7
-rw-r--r--drivers/scsi/3w-sas.h7
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c77
-rw-r--r--drivers/scsi/NCR5380.h11
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c320
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c40
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/g_NCR5380.c153
-rw-r--r--drivers/scsi/g_NCR5380.h2
-rw-r--r--drivers/scsi/hpsa.c37
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c43
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c34
-rw-r--r--drivers/scsi/qedi/Kconfig10
-rw-r--r--drivers/scsi/qedi/Makefile5
-rw-r--r--drivers/scsi/qedi/qedi.h364
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c143
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h144
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c244
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2378
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h73
-rw-r--r--drivers/scsi/qedi/qedi_hsi.h52
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1624
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h232
-rw-r--r--drivers/scsi/qedi/qedi_main.c2127
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c52
-rw-r--r--drivers/scsi/qedi/qedi_version.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h108
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c224
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c475
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c44
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h30
-rw-r--r--drivers/scsi/ufs/ufshcd.c55
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h7
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h8
-rw-r--r--drivers/target/loopback/tcm_loop.h4
-rw-r--r--drivers/target/sbp/sbp_target.c3
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_alua.h2
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_iblock.h3
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c5
-rw-r--r--drivers/target/target_core_pr.h4
-rw-r--r--drivers/target/target_core_pscsi.h7
-rw-r--r--drivers/target/target_core_rd.c2
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c1
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_user.c5
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/target/target_core_xcopy.h2
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h3
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--fs/befs/befs.h3
-rw-r--r--fs/befs/befs_fs_types.h12
-rw-r--r--fs/befs/btree.c48
-rw-r--r--fs/befs/btree.h8
-rw-r--r--fs/befs/datastream.c8
-rw-r--r--fs/befs/datastream.h5
-rw-r--r--fs/befs/debug.c14
-rw-r--r--fs/befs/inode.c12
-rw-r--r--fs/befs/inode.h5
-rw-r--r--fs/befs/io.c7
-rw-r--r--fs/befs/io.h1
-rw-r--r--fs/befs/linuxvfs.c134
-rw-r--r--fs/befs/super.h4
-rw-r--r--fs/nfs/dir.c23
-rw-r--r--fs/nfs/file.c12
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c3
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c75
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs4proc.c43
-rw-r--r--fs/nfs/nfs4state.c57
-rw-r--r--fs/nfs/nfs4xdr.c37
-rw-r--r--fs/nfs/pnfs.c16
-rw-r--r--fs/splice.c9
-rw-r--r--include/acpi/acpi_io.h2
-rw-r--r--include/acpi/acpixf.h17
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/platform/aclinuxex.h1
-rw-r--r--include/dt-bindings/net/mdio.h19
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cacheinfo.h3
-rw-r--r--include/linux/configfs.h13
-rw-r--r--include/linux/ima.h12
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/ratelimit.h7
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/target/iscsi/iscsi_target_core.h14
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/iscsi/iscsi_transport.h6
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h12
-rw-r--r--include/target/target_core_fabric.h4
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--kernel/kcov.c8
-rw-r--r--kernel/kexec_file.c4
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/fadvise.c15
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/openvswitch/flow_netlink.c1
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/sched/sch_fq.c14
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/bind_addr.c3
-rw-r--r--net/sctp/protocol.c40
-rw-r--r--scripts/selinux/genheaders/Makefile4
-rw-r--r--scripts/selinux/genheaders/genheaders.c4
-rw-r--r--scripts/selinux/mdp/Makefile4
-rw-r--r--scripts/selinux/mdp/mdp.c4
-rw-r--r--security/integrity/ima/Kconfig12
-rw-r--r--security/integrity/ima/Makefile1
-rw-r--r--security/integrity/ima/ima.h31
-rw-r--r--security/integrity/ima/ima_crypto.c6
-rw-r--r--security/integrity/ima/ima_fs.c30
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_kexec.c168
-rw-r--r--security/integrity/ima/ima_main.c1
-rw-r--r--security/integrity/ima/ima_queue.c77
-rw-r--r--security/integrity/ima/ima_template.c297
-rw-r--r--security/integrity/ima/ima_template_lib.c7
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--sound/usb/endpoint.c12
347 files changed, 16211 insertions, 2663 deletions
diff --git a/CREDITS b/CREDITS
index 10a9eee807b6..c58560701d13 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3949,8 +3949,6 @@ E: gwingerde@gmail.com
D: Ralink rt2x00 WLAN driver
D: Minix V2 file-system
D: Misc fixes
-S: Geessinkweg 177
-S: 7544 TX Enschede
S: The Netherlands
N: Lars Wirzenius
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 498741737055..2a4a423d08e0 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -272,6 +272,22 @@ Description: Parameters for the CPU cache attributes
the modified cache line is written to main
memory only when it is replaced
+
+What: /sys/devices/system/cpu/cpu*/cache/index*/id
+Date: September 2016
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Cache id
+
+ The id provides a unique number for a specific instance of
+ a cache of a particular type. E.g. there may be a level
+ 3 unified cache on each socket in a server and we may
+ assign them ids 0, 1, 2, ...
+
+ Note that id value can be non-contiguous. E.g. level 1
+ caches typically exist per core, but there may not be a
+ power of two cores on a socket, so these caches may be
+ numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ...
+
What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index be2d6d0a03a4..21e2d8863705 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1441,6 +1441,10 @@
The builtin appraise policy appraises all files
owned by uid=0.
+ ima_canonical_fmt [IMA]
+ Use the canonical format for the binary runtime
+ measurements, instead of host native format.
+
ima_hash= [IMA]
Format: { md5 | sha1 | rmd160 | sha256 | sha384
| sha512 | ... }
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
index 750374fc9d94..c0f37cb41a9b 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
@@ -1,7 +1,9 @@
* Cadence SD/SDIO/eMMC Host Controller
Required properties:
-- compatible: should be "cdns,sd4hc".
+- compatible: should be one of the following:
+ "cdns,sd4hc" - default of the IP
+ "socionext,uniphier-sd4hc" - for Socionext UniPhier SoCs
- reg: offset and length of the register set for the device.
- interrupts: a single interrupt specifier.
- clocks: phandle to the input clock.
@@ -19,7 +21,7 @@ if supported. See mmc.txt for details.
Example:
emmc: sdhci@5a000000 {
- compatible = "cdns,sd4hc";
+ compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
clocks = <&clk 4>;
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
index 063c02da018a..eea73adc678f 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
@@ -2,11 +2,14 @@ Hisilicon hix5hd2 gmac controller
Required properties:
- compatible: should contain one of the following SoC strings:
- * "hisilicon,hix5hd2-gemac"
- * "hisilicon,hi3798cv200-gemac"
+ * "hisilicon,hix5hd2-gmac"
+ * "hisilicon,hi3798cv200-gmac"
+ * "hisilicon,hi3516a-gmac"
and one of the following version string:
- * "hisilicon,hisi-gemac-v1"
- * "hisilicon,hisi-gemac-v2"
+ * "hisilicon,hisi-gmac-v1"
+ * "hisilicon,hisi-gmac-v2"
+ The version v1 includes SoCs hix5hd2.
+ The version v2 includes SoCs hi3798cv200, hi3516a.
- reg: specifies base physical address(s) and size of the device registers.
The first region is the MAC register base and size.
The second region is external interface control register.
@@ -35,7 +38,7 @@ Required properties:
Example:
gmac0: ethernet@f9840000 {
- compatible = "hisilicon,hi3798cv200-gemac", "hisilicon,hisi-gemac-v2";
+ compatible = "hisilicon,hi3798cv200-gmac", "hisilicon,hisi-gmac-v2";
reg = <0xf9840000 0x1000>,<0xf984300c 0x4>;
interrupts = <0 71 4>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 54749b60a466..ff1bc4b1bb3b 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -38,8 +38,14 @@ Optional Properties:
- enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to
compensate for the board being designed with the lanes swapped.
-- eee-broken-modes: Bits to clear in the MDIO_AN_EEE_ADV register to
- disable EEE broken modes.
+- eee-broken-100tx:
+- eee-broken-1000t:
+- eee-broken-10gt:
+- eee-broken-1000kx:
+- eee-broken-10gkx4:
+- eee-broken-10gkr:
+ Mark the corresponding energy efficient ethernet mode as broken and
+ request the ethernet to stop advertising it.
Example:
diff --git a/Documentation/features/io/dma-api-debug/arch-support.txt b/Documentation/features/io/dma-api-debug/arch-support.txt
index 4f4a3443b114..ffa522a9bdfd 100644
--- a/Documentation/features/io/dma-api-debug/arch-support.txt
+++ b/Documentation/features/io/dma-api-debug/arch-support.txt
@@ -36,5 +36,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index a97e8e3f4ebb..83d2cf989ea3 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -36,5 +36,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/features/io/sg-chain/arch-support.txt b/Documentation/features/io/sg-chain/arch-support.txt
index b9b675539b9d..6ca98f9911bb 100644
--- a/Documentation/features/io/sg-chain/arch-support.txt
+++ b/Documentation/features/io/sg-chain/arch-support.txt
@@ -7,7 +7,7 @@
| arch |status|
-----------------------
| alpha: | TODO |
- | arc: | TODO |
+ | arc: | ok |
| arm: | ok |
| arm64: | ok |
| avr32: | TODO |
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
index e2c187947e58..37b1967a00a9 100644
--- a/Documentation/scsi/g_NCR5380.txt
+++ b/Documentation/scsi/g_NCR5380.txt
@@ -6,17 +6,15 @@ NCR53c400 extensions (c) 1994,1995,1996 Kevin Lentin
This file documents the NCR53c400 extensions by Kevin Lentin and some
enhancements to the NCR5380 core.
-This driver supports both NCR5380 and NCR53c400 cards in port or memory
-mapped modes. Currently this driver can only support one of those mapping
-modes at a time but it does support both of these chips at the same time.
-The next release of this driver will support port & memory mapped cards at
-the same time. It should be able to handle multiple different cards in the
-same machine.
+This driver supports NCR5380 and NCR53c400 and compatible cards in port or
+memory mapped modes.
-The drivers/scsi/Makefile has an override in it for the most common
-NCR53c400 card, the Trantor T130B in its default configuration:
- Port: 0x350
- IRQ : 5
+Use of an interrupt is recommended, if supported by the board, as this will
+allow targets to disconnect and thereby improve SCSI bus utilization.
+
+If the irq parameter is 254 or is omitted entirely, the driver will probe
+for the correct IRQ line automatically. If the irq parameter is 0 or 255
+then no IRQ will be used.
The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
supported by the driver.
@@ -47,22 +45,24 @@ These old-style parameters can support only one card:
dtc_3181e=1 to set up for a Domex Technology Corp 3181E board
hp_c2502=1 to set up for a Hewlett Packard C2502 board
-e.g.
-OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
-NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0
- for a port mapped NCR5380 board or
-
-OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
-NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
- for a memory mapped NCR53C400 board with interrupts disabled or
+E.g. Trantor T130B in its default configuration:
+modprobe g_NCR5380 irq=5 base=0x350 card=1
+or alternatively, using the old syntax,
+modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
-NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
- for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ
- and HP C2502 at 0x300 with IRQ 7
+E.g. a port mapped NCR5380 board, driver to probe for IRQ:
+modprobe g_NCR5380 base=0x350 card=0
+or alternatively,
+modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
-(255 should be specified for no or DMA interrupt, 254 to autoprobe for an
- IRQ line if overridden on the command line.)
+E.g. a memory mapped NCR53C400 board with no IRQ:
+modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+or alternatively,
+modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+and HP C2502 at 0x300 with IRQ 7:
+modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
Kevin Lentin
K.Lentin@cs.monash.edu.au
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 55f275793028..25feb0d35e7a 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -157,6 +157,11 @@ class ListTableBuilder(object):
def buildTableNode(self):
colwidths = self.directive.get_column_widths(self.max_cols)
+ if isinstance(colwidths, tuple):
+ # Since docutils 0.13, get_column_widths returns a (widths,
+ # colwidths) tuple, where widths is a string (i.e. 'auto').
+ # See https://sourceforge.net/p/docutils/patches/120/.
+ colwidths = colwidths[1]
stub_columns = self.directive.options.get('stub-columns', 0)
header_rows = self.directive.options.get('header-rows', 0)
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
new file mode 100644
index 000000000000..d918d268cd72
--- /dev/null
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -0,0 +1,214 @@
+User Interface for Resource Allocation in Intel Resource Director Technology
+
+Copyright (C) 2016 Intel Corporation
+
+Fenghua Yu <fenghua.yu@intel.com>
+Tony Luck <tony.luck@intel.com>
+
+This feature is enabled by the CONFIG_INTEL_RDT_A Kconfig and the
+X86 /proc/cpuinfo flag bits "rdt", "cat_l3" and "cdp_l3".
+
+To use the feature mount the file system:
+
+ # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+
+mount options are:
+
+"cdp": Enable code/data prioritization in L3 cache allocations.
+
+
+Info directory
+--------------
+
+The 'info' directory contains information about the enabled
+resources. Each resource has its own subdirectory. The subdirectory
+names reflect the resource names. Each subdirectory contains the
+following files:
+
+"num_closids": The number of CLOSIDs which are valid for this
+ resource. The kernel uses the smallest number of
+ CLOSIDs of all enabled resources as limit.
+
+"cbm_mask": The bitmask which is valid for this resource. This
+ mask is equivalent to 100%.
+
+"min_cbm_bits": The minimum number of consecutive bits which must be
+ set when writing a mask.
+
+
+Resource groups
+---------------
+Resource groups are represented as directories in the resctrl file
+system. The default group is the root directory. Other groups may be
+created as desired by the system administrator using the "mkdir(1)"
+command, and removed using "rmdir(1)".
+
+There are three files associated with each group:
+
+"tasks": A list of tasks that belongs to this group. Tasks can be
+ added to a group by writing the task ID to the "tasks" file
+ (which will automatically remove them from the previous
+ group to which they belonged). New tasks created by fork(2)
+ and clone(2) are added to the same group as their parent.
+ If a pid is not in any sub partition, it is in root partition
+ (i.e. default partition).
+
+"cpus": A bitmask of logical CPUs assigned to this group. Writing
+ a new mask can add/remove CPUs from this group. Added CPUs
+ are removed from their previous group. Removed ones are
+ given to the default (root) group. You cannot remove CPUs
+ from the default group.
+
+"schemata": A list of all the resources available to this group.
+ Each resource has its own line and format - see below for
+ details.
+
+When a task is running the following rules define which resources
+are available to it:
+
+1) If the task is a member of a non-default group, then the schemata
+for that group is used.
+
+2) Else if the task belongs to the default group, but is running on a
+CPU that is assigned to some specific group, then the schemata for
+the CPU's group is used.
+
+3) Otherwise the schemata for the default group is used.
+
+
+Schemata files - general concepts
+---------------------------------
+Each line in the file describes one resource. The line starts with
+the name of the resource, followed by specific values to be applied
+in each of the instances of that resource on the system.
+
+Cache IDs
+---------
+On current generation systems there is one L3 cache per socket and L2
+caches are generally just shared by the hyperthreads on a core, but this
+isn't an architectural requirement. We could have multiple separate L3
+caches on a socket, multiple cores could share an L2 cache. So instead
+of using "socket" or "core" to define the set of logical cpus sharing
+a resource we use a "Cache ID". At a given cache level this will be a
+unique number across the whole system (but it isn't guaranteed to be a
+contiguous sequence, there may be gaps). To find the ID for each logical
+CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
+
+Cache Bit Masks (CBM)
+---------------------
+For cache resources we describe the portion of the cache that is available
+for allocation using a bitmask. The maximum value of the mask is defined
+by each cpu model (and may be different for different cache levels). It
+is found using CPUID, but is also provided in the "info" directory of
+the resctrl file system in "info/{resource}/cbm_mask". X86 hardware
+requires that these masks have all the '1' bits in a contiguous block. So
+0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9
+and 0xA are not. On a system with a 20-bit mask each bit represents 5%
+of the capacity of the cache. You could partition the cache into four
+equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
+
+
+L3 details (code and data prioritization disabled)
+--------------------------------------------------
+With CDP disabled the L3 schemata format is:
+
+ L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L3 details (CDP enabled via mount option to resctrl)
+----------------------------------------------------
+When CDP is enabled L3 control is split into two separate resources
+so you can specify independent masks for code and data like this:
+
+ L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+ L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L2 details
+----------
+L2 cache does not support code and data prioritization, so the
+schemata format is always:
+
+ L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+Example 1
+---------
+On a two socket machine (one L3 cache per socket) with just four bits
+for cache bit masks
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+
+The default resource group is unmodified, so we have access to all parts
+of all caches (its schemata file reads "L3:0=f;1=f").
+
+Tasks that are under the control of group "p0" may only allocate from the
+"lower" 50% on cache ID 0, and the "upper" 50% of cache ID 1.
+Tasks in group "p1" use the "lower" 50% of cache on both sockets.
+
+Example 2
+---------
+Again two sockets, but this time with a more realistic 20-bit mask.
+
+Two real time tasks pid=1234 running on processor 0 and pid=5678 running on
+processor 1 on socket 0 on a 2-socket and dual core machine. To avoid noisy
+neighbors, each of the two real-time tasks exclusively occupies one quarter
+of L3 cache on socket 0.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff;1=fffff" > schemata
+
+Next we make a resource group for our first real time task and give
+it access to the "top" 25% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=f8000;1=fffff" > p0/schemata
+
+Finally we move our first real time task into this resource group. We
+also use taskset(1) to ensure the task always runs on a dedicated CPU
+on socket 0. Most uses of resource groups will also constrain which
+processors tasks run on.
+
+# echo 1234 > p0/tasks
+# taskset -cp 1 1234
+
+Ditto for the second real time task (with the remaining 25% of cache):
+
+# mkdir p1
+# echo "L3:0=7c00;1=fffff" > p1/schemata
+# echo 5678 > p1/tasks
+# taskset -cp 2 5678
+
+Example 3
+---------
+
+A single socket system which has real-time tasks running on core 4-7 and
+non real-time workload assigned to core 0-3. The real-time tasks share text
+and data, so a per task association is not required and due to interaction
+with the kernel it's desired that the kernel on these cores shares L3 with
+the tasks.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff" > schemata
+
+Next we make a resource group for our real time cores and give
+it access to the "top" 50% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=ffc00;" > p0/schemata
+
+Finally we move core 4-7 over to the new group and make sure that the
+kernel and the tasks running there get 50% of the cache.
+
+# echo C0 > p0/cpus
diff --git a/MAINTAINERS b/MAINTAINERS
index f6eb97b35e0f..979126a9a150 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -143,7 +143,7 @@ S: Maintained
F: drivers/net/ethernet/3com/typhoon*
3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
-M: Adam Radford <linuxraid@lsi.com>
+M: Adam Radford <aradford@gmail.com>
L: linux-scsi@vger.kernel.org
W: http://www.lsi.com
S: Supported
@@ -1747,7 +1747,7 @@ F: drivers/staging/media/platform/s5p-cec/
ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
M: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
-M: Jacek Anaszewski <j.anaszewski@samsung.com>
+M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
@@ -7229,7 +7229,7 @@ F: drivers/scsi/53c700*
LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
-M: Jacek Anaszewski <j.anaszewski@samsung.com>
+M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
M: Pavel Machek <pavel@ucw.cz>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
@@ -10136,6 +10136,12 @@ F: drivers/net/ethernet/qlogic/qed/
F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/
+QLOGIC QL41xxx ISCSI DRIVER
+M: QLogic-Storage-Upstream@cavium.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/qedi/
+
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -10327,6 +10333,14 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rdmavt
+RDT - RESOURCE ALLOCATION
+M: Fenghua Yu <fenghua.yu@intel.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: arch/x86/kernel/cpu/intel_rdt*
+F: arch/x86/include/asm/intel_rdt*
+F: Documentation/x86/intel_rdt*
+
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
diff --git a/arch/Kconfig b/arch/Kconfig
index 19483aea4bbc..99839c23d453 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -5,6 +5,9 @@
config KEXEC_CORE
bool
+config HAVE_IMA_KEXEC
+ bool
+
config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ab12723d39a0..c75d29077e4a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -9,6 +9,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_SG_CHAIN
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index da41a54ea2d7..f659942744de 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -244,7 +244,7 @@ struct cpuinfo_arc_mmu {
};
struct cpuinfo_arc_cache {
- unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
+ unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
};
struct cpuinfo_arc_bpu {
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index a093adbdb017..fc662f49c55a 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
+#define CACHE_COLORS_NUM 4
+#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
@@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
}
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-
/*
* checks if two addresses (after page aligning) index into same cache set
*/
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index e880dfa3fcd3..a64c447b0337 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -38,10 +38,10 @@
#define AUX_IRQ_ACT_BIT_U 31
/*
- * User space should be interruptable even by lowest prio interrupt
- * Safe even if actual interrupt priorities is fewer or even one
+ * Hardware supports 16 priorities (0 highest, 15 lowest)
+ * Linux by default runs at 1, priority 0 reserved for NMI style interrupts
*/
-#define ARCV2_IRQ_DEF_PRIO 15
+#define ARCV2_IRQ_DEF_PRIO 1
/* seed value for status register */
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 7a1c124ff021..0b6388a5f0b8 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -67,12 +67,23 @@ ENTRY(handle_interrupt)
INTERRUPT_PROLOGUE irq
- clri ; To make status32.IE agree with CPU internal state
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- TRACE_ASM_IRQ_DISABLE
-#endif
-
+ # irq control APIs local_irq_save/restore/disable/enable fiddle with
+ # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
+ # However a taken interrupt doesn't clear these bits. Thus irqs_disabled()
+ # query in hard ISR path would return false (since .IE is set) which would
+ # trips genirq interrupt handling asserts.
+ #
+ # So do a "soft" disable of interrutps here.
+ #
+ # Note this disable is only for consistent book-keeping as further interrupts
+ # will be disabled anyways even w/o this. Hardware tracks active interrupts
+ # seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts
+ # unless this one returns (or higher prio becomes pending in 2-prio scheme)
+
+ IRQ_DISABLE
+
+ ; icause is banked: one per priority level
+ ; so a higher prio interrupt taken here won't clobber prev prio icause
lr r0, [ICAUSE]
mov blink, ret_from_exception
@@ -171,6 +182,7 @@ END(EV_TLBProtV)
; All 2 entry points to here already disable interrupts
.Lrestore_regs:
+restore_regs:
# Interrpts are actually disabled from this point on, but will get
# reenabled after we return from interrupt/exception.
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 98812c1248df..9211707634dc 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -259,7 +259,7 @@ ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
- lr r2, [ecr]
+ mov r2, r9 ; ECR set into r9 already
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
; Exception auto-disables further Intr/exceptions.
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 62b59409a5d9..994dca7014db 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -14,8 +14,6 @@
#include <linux/irqchip.h>
#include <asm/irq.h>
-static int irq_prio;
-
/*
* Early Hardware specific Interrupt setup
* -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,7 +22,7 @@ static int irq_prio;
*/
void arc_init_IRQ(void)
{
- unsigned int tmp;
+ unsigned int tmp, irq_prio;
struct irq_build {
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -67,12 +65,12 @@ void arc_init_IRQ(void)
irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
- irq_prio + 1, irq_prio,
+ irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
irq_bcr.firq ? " FIRQ (not used)":"");
/* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(0xa);
- tmp |= STATUS_AD_MASK | (irq_prio << 1);
+ tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp));
}
@@ -93,7 +91,7 @@ void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->irq);
- write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
+ write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
* hw auto enables (linux unmask) all by default
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 50d71695cd4e..ec86ac0e3321 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -40,7 +40,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
struct cpuinfo_arc_cache *p;
#define PR_CACHE(p, cfg, str) \
- if (!(p)->ver) \
+ if (!(p)->line_len) \
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
else \
n += scnprintf(buf + n, len - n, \
@@ -54,7 +54,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
p = &cpuinfo_arc700[c].slc;
- if (p->ver)
+ if (p->line_len)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
@@ -104,7 +104,6 @@ static void read_decode_cache_bcr_arcv2(int cpu)
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
- p_slc->ver = sbcr.ver;
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
}
@@ -152,7 +151,6 @@ void read_decode_cache_bcr(void)
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
- p_ic->ver = ibcr.ver;
p_ic->vipt = 1;
p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
@@ -176,7 +174,6 @@ dc_chk:
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
- p_dc->ver = dbcr.ver;
slc_chk:
if (is_isa_arcv2())
@@ -945,17 +942,13 @@ void arc_cache_init(void)
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
- if (!ic->ver)
+ if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
if (ic->line_len != L1_CACHE_BYTES)
panic("ICache line [%d] != kernel Config [%d]",
ic->line_len, L1_CACHE_BYTES);
- if (ic->ver != CONFIG_ARC_MMU_VER)
- panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
- ic->ver, CONFIG_ARC_MMU_VER);
-
/*
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
@@ -969,7 +962,7 @@ void arc_cache_init(void)
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
- if (!dc->ver)
+ if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
if (dc->line_len != L1_CACHE_BYTES)
@@ -979,11 +972,16 @@ void arc_cache_init(void)
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-
- if (dc->alias && !handled)
- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- else if (!dc->alias && handled)
+ int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
+
+ if (dc->alias) {
+ if (!handled)
+ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ if (CACHE_COLORS_NUM != num_colors)
+ panic("CACHE_COLORS_NUM not optimized for config\n");
+ } else if (!dc->alias && handled) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ }
}
}
diff --git a/arch/arm/boot/dts/hisi-x5hd2.dtsi b/arch/arm/boot/dts/hisi-x5hd2.dtsi
index c02e092fad8b..6c712a97e1fe 100644
--- a/arch/arm/boot/dts/hisi-x5hd2.dtsi
+++ b/arch/arm/boot/dts/hisi-x5hd2.dtsi
@@ -438,7 +438,7 @@
};
gmac0: ethernet@1840000 {
- compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
+ compatible = "hisilicon,hix5hd2-gmac", "hisilicon,hisi-gmac-v1";
reg = <0x1840000 0x1000>,<0x184300c 0x4>;
interrupts = <0 71 4>;
clocks = <&clock HIX5HD2_MAC0_CLK>;
@@ -447,7 +447,7 @@
};
gmac1: ethernet@1841000 {
- compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
+ compatible = "hisilicon,hix5hd2-gmac", "hisilicon,hisi-gmac-v1";
reg = <0x1841000 0x1000>,<0x1843010 0x4>;
interrupts = <0 72 4>;
clocks = <&clock HIX5HD2_MAC1_CLK>;
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index d0de0e032bc2..c1976c0adca7 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -29,7 +29,7 @@
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
-/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
+/* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b71086d25195..bfe632808d77 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -165,6 +165,11 @@ extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset;
+static inline unsigned long kaslr_offset(void)
+{
+ return kimage_vaddr - KIMAGE_VADDR;
+}
+
/*
* Allow all memory at the discovery stage. We will clip it later.
*/
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 252a6d9c1da5..64d9cbd61678 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -132,14 +132,13 @@ static int __init acpi_fadt_sanity_check(void)
struct acpi_table_header *table;
struct acpi_table_fadt *fadt;
acpi_status status;
- acpi_size tbl_size;
int ret = 0;
/*
* FADT is required on arm64; retrieve it to check its presence
* and carry out revision and ACPI HW reduced compliancy tests
*/
- status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
+ status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
@@ -170,10 +169,10 @@ static int __init acpi_fadt_sanity_check(void)
out:
/*
- * acpi_get_table_with_size() creates FADT table mapping that
+ * acpi_get_table() creates FADT table mapping that
* should be released after parsing and before resuming boot
*/
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index a53f52ac81c6..b051367e2149 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -338,11 +338,11 @@ subsys_initcall(topology_init);
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
- u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
+ const unsigned long offset = kaslr_offset();
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
- pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
- kaslr_offset, KIMAGE_VADDR);
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+ offset, KIMAGE_VADDR);
} else {
pr_emerg("Kernel Offset: disabled\n");
}
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 805ae5d712e8..032fed71223f 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 392
+#define __NR_syscalls 398
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index a8bd3fa28bc7..d8086159d996 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -407,5 +407,11 @@
#define __NR_userfaultfd 389
#define __NR_membarrier 390
#define __NR_mlock2 391
+#define __NR_copy_file_range 392
+#define __NR_preadv2 393
+#define __NR_pwritev2 394
+#define __NR_pkey_mprotect 395
+#define __NR_pkey_alloc 396
+#define __NR_pkey_free 397
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index b70bb538f001..96b3f26d16be 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -49,6 +49,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"9.3", 0x20},
{"9.4", 0x21},
{"9.5", 0x22},
+ {"9.6", 0x23},
+ {"10.0", 0x24},
{NULL, 0},
};
@@ -75,6 +77,10 @@ const struct family_string_key family_string_lookup[] = {
{"zynq7000", 0x12},
{"UltraScale Virtex", 0x13},
{"UltraScale Kintex", 0x14},
+ {"UltraScale+ Zynq", 0x15},
+ {"UltraScale+ Virtex", 0x16},
+ {"UltraScale+ Kintex", 0x17},
+ {"Spartan7", 0x18},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 6b3dd99126d7..6841c2df14d9 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -392,3 +392,9 @@ ENTRY(sys_call_table)
.long sys_userfaultfd
.long sys_membarrier /* 390 */
.long sys_mlock2
+ .long sys_copy_file_range
+ .long sys_preadv2
+ .long sys_pwritev2
+ .long sys_pkey_mprotect /* 395 */
+ .long sys_pkey_alloc
+ .long sys_pkey_free
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 5bbf38b916ef..9e954959f605 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -259,7 +259,7 @@ static int __init xilinx_timer_init(struct device_node *timer)
int ret;
if (initialized)
- return;
+ return -EINVAL;
initialized = 1;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a14b86587013..3a71f38cdc05 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -7,6 +7,7 @@ config PARISC
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_HAS_ELF_RANDOMIZE
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 78c9fd32c554..a6b2a421571e 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -348,9 +348,10 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
-#define STACK_RND_MASK (is_32bit_task() ? \
- 0x7ff >> (PAGE_SHIFT - 12) : \
- 0x3ffff >> (PAGE_SHIFT - 12))
+/* Masks for stack and mmap randomization */
+#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+#define STACK_RND_MASK MMAP_RND_MASK
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index 47539f117958..e1d289092705 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -289,7 +289,7 @@ extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
-extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index ca40741378be..a3661ee6b060 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -93,9 +93,7 @@ struct system_cpuinfo_parisc {
/* Per CPU data structure - ie varies per CPU. */
struct cpuinfo_parisc {
unsigned long it_value; /* Interval Timer at last timer Intr */
- unsigned long it_delta; /* Interval delta (tic_10ms / HZ * 100) */
unsigned long irq_count; /* number of IRQ's since boot */
- unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
@@ -103,8 +101,6 @@ struct cpuinfo_parisc {
unsigned long pending_ipi; /* bitmap of type ipi_message_type */
#endif
unsigned long bh_count; /* number of times bh was invoked */
- unsigned long prof_counter; /* per CPU profiling support */
- unsigned long prof_multiplier; /* per CPU profiling support */
unsigned long fp_rev;
unsigned long fp_model;
unsigned int state;
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4fcff2dcc9c3..ad4cb1613c57 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -878,6 +878,9 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR7(%r16)
intr_return:
+ /* NOTE: Need to enable interrupts incase we schedule. */
+ ssm PSW_SM_I, %r0
+
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -904,11 +907,6 @@ intr_check_sig:
LDREG PT_IASQ1(%r16), %r20
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
- /* NOTE: We need to enable interrupts if we have to deliver
- * signals. We used to do this earlier but it caused kernel
- * stack overflows. */
- ssm PSW_SM_I, %r0
-
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
@@ -960,10 +958,6 @@ intr_do_resched:
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
- /* NOTE: We need to enable interrupts if we schedule. We used
- * to do this earlier but it caused kernel stack overflows. */
- ssm PSW_SM_I, %r0
-
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index e5d71905cad5..9d797ae4fa22 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1258,7 +1258,7 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long
*
* Retrieve the cpu number for the cpu at the specified HPA.
*/
-int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa)
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa)
{
int retval;
unsigned long flags;
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index c05d1876d27c..c9789d9c73b4 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -216,9 +216,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
register_parisc_device(dev); /* advertise device */
#ifdef DEBUG_PAT
- pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
/* dump what we see so far... */
switch (PAT_GET_ENTITY(dev->mod_info)) {
+ pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
unsigned long i;
case PAT_ENTITY_PROC:
@@ -259,9 +259,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
- i, io_pdc_cell->mod[2 + i * 3], /* type */
- io_pdc_cell->mod[3 + i * 3], /* start */
- io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
+ i, io_pdc_cell.mod[2 + i * 3], /* type */
+ io_pdc_cell.mod[3 + i * 3], /* start */
+ io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 518f4f5f1f43..6eabce62463b 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -301,7 +301,6 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
- int err;
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
@@ -320,8 +319,8 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
if (count != sizeof(uint32_t))
return -EIO;
- if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
- return err;
+ if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
+ return -EFAULT;
/* Get the interface type and test type */
interface_type = (image_type >> 16) & 0xffff;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 40639439d8b3..ea6603ee8d24 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -276,11 +276,7 @@ void *dereference_function_descriptor(void *ptr)
static inline unsigned long brk_rnd(void)
{
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
- else
- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+ return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 0c2a94a0f751..85de47f4eb59 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -78,11 +78,6 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
static void
init_percpu_prof(unsigned long cpunum)
{
- struct cpuinfo_parisc *p;
-
- p = &per_cpu(cpu_data, cpunum);
- p->prof_counter = 1;
- p->prof_multiplier = 1;
}
@@ -99,6 +94,7 @@ static int processor_probe(struct parisc_device *dev)
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
+ struct pdc_pat_cpu_num cpu_info __maybe_unused;
#ifdef CONFIG_SMP
if (num_online_cpus() >= nr_cpu_ids) {
@@ -123,10 +119,6 @@ static int processor_probe(struct parisc_device *dev)
ulong status;
unsigned long bytecnt;
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
-#undef USE_PAT_CPUID
-#ifdef USE_PAT_CPUID
- struct pdc_pat_cpu_num cpu_info;
-#endif
pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
if (!pa_pdc_cell)
@@ -145,22 +137,27 @@ static int processor_probe(struct parisc_device *dev)
kfree(pa_pdc_cell);
+ /* get the cpu number */
+ status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
+ BUG_ON(PDC_OK != status);
+
+ pr_info("Logical CPU #%lu is physical cpu #%lu at location "
+ "0x%lx with hpa %pa\n",
+ cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
+ &dev->hpa.start);
+
+#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
* of cpuid is for physical CPUs and we just don't care yet.
* We'll care when we need to query PAT PDC about a CPU *after*
* boot time (ie shutdown a CPU from an OS perspective).
*/
- /* get the cpu number */
- status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
-
- BUG_ON(PDC_OK != status);
-
if (cpu_info.cpu_num >= NR_CPUS) {
- printk(KERN_WARNING "IGNORING CPU at 0x%x,"
+ printk(KERN_WARNING "IGNORING CPU at %pa,"
" cpu_slot_id > NR_CPUS"
" (%ld > %d)\n",
- dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
+ &dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
/* Ignore CPU since it will only crash */
boot_cpu_data.cpu_count--;
return 1;
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 0a393a04e891..a81e177cac7b 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,19 +225,17 @@ static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
- /*
- * 8 bits of randomness in 32bit mmaps, 20 address space bits
- * 28 bits of randomness in 64bit mmaps, 40 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (is_32bit_task())
- rnd = get_random_int() % (1<<8);
- else
- rnd = get_random_int() % (1<<28);
- }
+ if (current->flags & PF_RANDOMIZE)
+ rnd = get_random_int() & MMAP_RND_MASK;
+
return rnd << PAGE_SHIFT;
}
+unsigned long arch_mmap_rnd(void)
+{
+ return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+}
+
static unsigned long mmap_legacy_base(void)
{
return TASK_UNMAPPED_BASE + mmap_rnd();
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 325f30d82b64..4215f5596c8b 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -59,10 +59,9 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
*/
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
- unsigned long now, now2;
+ unsigned long now;
unsigned long next_tick;
- unsigned long cycles_elapsed, ticks_elapsed = 1;
- unsigned long cycles_remainder;
+ unsigned long ticks_elapsed = 0;
unsigned int cpu = smp_processor_id();
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
@@ -71,102 +70,49 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
profile_tick(CPU_PROFILING);
- /* Initialize next_tick to the expected tick time. */
+ /* Initialize next_tick to the old expected tick time. */
next_tick = cpuinfo->it_value;
- /* Get current cycle counter (Control Register 16). */
- now = mfctl(16);
-
- cycles_elapsed = now - next_tick;
-
- if ((cycles_elapsed >> 6) < cpt) {
- /* use "cheap" math (add/subtract) instead
- * of the more expensive div/mul method
- */
- cycles_remainder = cycles_elapsed;
- while (cycles_remainder > cpt) {
- cycles_remainder -= cpt;
- ticks_elapsed++;
- }
- } else {
- /* TODO: Reduce this to one fdiv op */
- cycles_remainder = cycles_elapsed % cpt;
- ticks_elapsed += cycles_elapsed / cpt;
- }
-
- /* convert from "division remainder" to "remainder of clock tick" */
- cycles_remainder = cpt - cycles_remainder;
-
- /* Determine when (in CR16 cycles) next IT interrupt will fire.
- * We want IT to fire modulo clocktick even if we miss/skip some.
- * But those interrupts don't in fact get delivered that regularly.
- */
- next_tick = now + cycles_remainder;
+ /* Calculate how many ticks have elapsed. */
+ do {
+ ++ticks_elapsed;
+ next_tick += cpt;
+ now = mfctl(16);
+ } while (next_tick - now > cpt);
+ /* Store (in CR16 cycles) up to when we are accounting right now. */
cpuinfo->it_value = next_tick;
- /* Program the IT when to deliver the next interrupt.
- * Only bottom 32-bits of next_tick are writable in CR16!
- */
- mtctl(next_tick, 16);
+ /* Go do system house keeping. */
+ if (cpu == 0)
+ xtime_update(ticks_elapsed);
+
+ update_process_times(user_mode(get_irq_regs()));
- /* Skip one clocktick on purpose if we missed next_tick.
+ /* Skip clockticks on purpose if we know we would miss those.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
* later on a 1Ghz processor. We'll account for the missed
- * tick on the next timer interrupt.
+ * ticks on the next timer interrupt.
+ * We want IT to fire modulo clocktick even if we miss/skip some.
+ * But those interrupts don't in fact get delivered that regularly.
*
* "next_tick - now" will always give the difference regardless
* if one or the other wrapped. If "now" is "bigger" we'll end up
* with a very large unsigned number.
*/
- now2 = mfctl(16);
- if (next_tick - now2 > cpt)
- mtctl(next_tick+cpt, 16);
+ while (next_tick - mfctl(16) > cpt)
+ next_tick += cpt;
-#if 1
-/*
- * GGG: DEBUG code for how many cycles programming CR16 used.
- */
- if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
- printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
- " cyc %lX rem %lX "
- " next/now %lX/%lX\n",
- cpu, now2 - now, cycles_elapsed, cycles_remainder,
- next_tick, now );
-#endif
-
- /* Can we differentiate between "early CR16" (aka Scenario 1) and
- * "long delay" (aka Scenario 3)? I don't think so.
- *
- * Timer_interrupt will be delivered at least a few hundred cycles
- * after the IT fires. But it's arbitrary how much time passes
- * before we call it "late". I've picked one second.
- *
- * It's important NO printk's are between reading CR16 and
- * setting up the next value. May introduce huge variance.
- */
- if (unlikely(ticks_elapsed > HZ)) {
- /* Scenario 3: very long delay? bad in any case */
- printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
- " cycles %lX rem %lX "
- " next/now %lX/%lX\n",
- cpu,
- cycles_elapsed, cycles_remainder,
- next_tick, now );
- }
-
- /* Done mucking with unreliable delivery of interrupts.
- * Go do system house keeping.
+ /* Program the IT when to deliver the next interrupt.
+ * Only bottom 32-bits of next_tick are writable in CR16!
+ * Timer interrupt will be delivered at least a few hundred cycles
+ * after the IT fires, so if we are too close (<= 500 cycles) to the
+ * next cycle, simply skip it.
*/
-
- if (!--cpuinfo->prof_counter) {
- cpuinfo->prof_counter = cpuinfo->prof_multiplier;
- update_process_times(user_mode(get_irq_regs()));
- }
-
- if (cpu == 0)
- xtime_update(ticks_elapsed);
+ if (next_tick - mfctl(16) <= 500)
+ next_tick += cpt;
+ mtctl(next_tick, 16);
return IRQ_HANDLED;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3da87e198878..a8ee573fe610 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -469,6 +469,7 @@ config KEXEC
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
+ select HAVE_IMA_KEXEC
select BUILD_BIN2C
depends on PPC64
depends on CRYPTO=y
diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h
new file mode 100644
index 000000000000..2313bdface34
--- /dev/null
+++ b/arch/powerpc/include/asm/ima.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_IMA_H
+#define _ASM_POWERPC_IMA_H
+
+struct kimage;
+
+int ima_get_kexec_buffer(void **addr, size_t *size);
+int ima_free_kexec_buffer(void);
+
+#ifdef CONFIG_IMA
+void remove_ima_buffer(void *fdt, int chosen_node);
+#else
+static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
+#endif
+
+#ifdef CONFIG_IMA_KEXEC
+int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
+ size_t size);
+
+int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
+#else
+static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
+ int chosen_node)
+{
+ remove_ima_buffer(fdt, chosen_node);
+ return 0;
+}
+#endif /* CONFIG_IMA_KEXEC */
+
+#endif /* _ASM_POWERPC_IMA_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 6c3b71502fbc..25668bc8cb2a 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -94,11 +94,22 @@ static inline bool kdump_in_progress(void)
#ifdef CONFIG_KEXEC_FILE
extern struct kexec_file_ops kexec_elf64_ops;
+#ifdef CONFIG_IMA_KEXEC
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ phys_addr_t ima_buffer_addr;
+ size_t ima_buffer_size;
+};
+#endif
+
int setup_purgatory(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
-int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
- unsigned long initrd_len, const char *cmdline);
+int setup_new_fdt(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ const char *cmdline);
+int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
#endif /* CONFIG_KEXEC_FILE */
#else /* !CONFIG_KEXEC_CORE */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a3a6047fd395..23f8082d7bfa 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -112,6 +112,10 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
+ifeq ($(CONFIG_HAVE_IMA_KEXEC)$(CONFIG_IMA),yy)
+obj-y += ima_kexec.o
+endif
+
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/powerpc/kernel/ima_kexec.c b/arch/powerpc/kernel/ima_kexec.c
new file mode 100644
index 000000000000..5ea42c937ca9
--- /dev/null
+++ b/arch/powerpc/kernel/ima_kexec.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/kexec.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/libfdt.h>
+
+static int get_addr_size_cells(int *addr_cells, int *size_cells)
+{
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ *addr_cells = of_n_addr_cells(root);
+ *size_cells = of_n_size_cells(root);
+
+ of_node_put(root);
+
+ return 0;
+}
+
+static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
+ size_t *size)
+{
+ int ret, addr_cells, size_cells;
+
+ ret = get_addr_size_cells(&addr_cells, &size_cells);
+ if (ret)
+ return ret;
+
+ if (len < 4 * (addr_cells + size_cells))
+ return -ENOENT;
+
+ *addr = of_read_number(prop, addr_cells);
+ *size = of_read_number(prop + 4 * addr_cells, size_cells);
+
+ return 0;
+}
+
+/**
+ * ima_get_kexec_buffer - get IMA buffer from the previous kernel
+ * @addr: On successful return, set to point to the buffer contents.
+ * @size: On successful return, set to the buffer size.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int ima_get_kexec_buffer(void **addr, size_t *size)
+{
+ int ret, len;
+ unsigned long tmp_addr;
+ size_t tmp_size;
+ const void *prop;
+
+ prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
+ if (!prop)
+ return -ENOENT;
+
+ ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
+ if (ret)
+ return ret;
+
+ *addr = __va(tmp_addr);
+ *size = tmp_size;
+
+ return 0;
+}
+
+/**
+ * ima_free_kexec_buffer - free memory used by the IMA buffer
+ */
+int ima_free_kexec_buffer(void)
+{
+ int ret;
+ unsigned long addr;
+ size_t size;
+ struct property *prop;
+
+ prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
+ if (!prop)
+ return -ENOENT;
+
+ ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
+ if (ret)
+ return ret;
+
+ ret = of_remove_property(of_chosen, prop);
+ if (ret)
+ return ret;
+
+ return memblock_free(addr, size);
+
+}
+
+/**
+ * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
+ *
+ * The IMA measurement buffer is of no use to a subsequent kernel, so we always
+ * remove it from the device tree.
+ */
+void remove_ima_buffer(void *fdt, int chosen_node)
+{
+ int ret, len;
+ unsigned long addr;
+ size_t size;
+ const void *prop;
+
+ prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
+ if (!prop)
+ return;
+
+ ret = do_get_kexec_buffer(prop, len, &addr, &size);
+ fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
+ if (ret)
+ return;
+
+ ret = delete_fdt_mem_rsv(fdt, addr, size);
+ if (!ret)
+ pr_debug("Removed old IMA buffer reservation.\n");
+}
+
+#ifdef CONFIG_IMA_KEXEC
+/**
+ * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer
+ *
+ * Architectures should use this function to pass on the IMA buffer
+ * information to the next kernel.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
+ size_t size)
+{
+ image->arch.ima_buffer_addr = load_addr;
+ image->arch.ima_buffer_size = size;
+
+ return 0;
+}
+
+static int write_number(void *p, u64 value, int cells)
+{
+ if (cells == 1) {
+ u32 tmp;
+
+ if (value > U32_MAX)
+ return -EINVAL;
+
+ tmp = cpu_to_be32(value);
+ memcpy(p, &tmp, sizeof(tmp));
+ } else if (cells == 2) {
+ u64 tmp;
+
+ tmp = cpu_to_be64(value);
+ memcpy(p, &tmp, sizeof(tmp));
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * setup_ima_buffer - add IMA buffer information to the fdt
+ * @image: kexec image being loaded.
+ * @fdt: Flattened device tree for the next kernel.
+ * @chosen_node: Offset to the chosen node.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node)
+{
+ int ret, addr_cells, size_cells, entry_size;
+ u8 value[16];
+
+ remove_ima_buffer(fdt, chosen_node);
+ if (!image->arch.ima_buffer_size)
+ return 0;
+
+ ret = get_addr_size_cells(&addr_cells, &size_cells);
+ if (ret)
+ return ret;
+
+ entry_size = 4 * (addr_cells + size_cells);
+
+ if (entry_size > sizeof(value))
+ return -EINVAL;
+
+ ret = write_number(value, image->arch.ima_buffer_addr, addr_cells);
+ if (ret)
+ return ret;
+
+ ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size,
+ size_cells);
+ if (ret)
+ return ret;
+
+ ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value,
+ entry_size);
+ if (ret < 0)
+ return -EINVAL;
+
+ ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr,
+ image->arch.ima_buffer_size);
+ if (ret)
+ return -EINVAL;
+
+ pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
+ image->arch.ima_buffer_addr, image->arch.ima_buffer_size);
+
+ return 0;
+}
+#endif /* CONFIG_IMA_KEXEC */
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
index 6acffd34a70f..9a42309b091a 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -627,7 +627,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
goto out;
}
- ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline);
+ ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
if (ret)
goto out;
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index 7abc8a75ee48..992c0d258e5d 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -27,6 +27,7 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <asm/ima.h>
#define SLAVE_CODE_SIZE 256
@@ -180,7 +181,7 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
*
* Return: 0 on success, or negative errno on error.
*/
-static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
+int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
{
int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
@@ -209,6 +210,7 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
/*
* setup_new_fdt - modify /chosen and memory reservation for the next kernel
+ * @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
* @initrd_load_addr: Address where the next initrd will be loaded.
* @initrd_len: Size of the next initrd, or 0 if there will be none.
@@ -217,8 +219,9 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
*
* Return: 0 on success, or negative errno on error.
*/
-int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
- unsigned long initrd_len, const char *cmdline)
+int setup_new_fdt(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ const char *cmdline)
{
int ret, chosen_node;
const void *prop;
@@ -328,6 +331,12 @@ int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
}
}
+ ret = setup_ima_buffer(image, fdt, chosen_node);
+ if (ret) {
+ pr_err("Error setting up the new device tree.\n");
+ return ret;
+ }
+
ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
if (ret) {
pr_err("Error setting up the new device tree.\n");
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 3803b0addf65..6c0ba75fb256 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -117,9 +117,6 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qe",
},
- {
- .compatible = "fsl,fman",
- },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 64024c999531..e487493bbd47 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -412,6 +412,19 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
+config INTEL_RDT_A
+ bool "Intel Resource Director Technology Allocation support"
+ default n
+ depends on X86 && CPU_SUP_INTEL
+ select KERNFS
+ help
+ Select to enable resource allocation which is a sub-feature of
+ Intel Resource Director Technology(RDT). More information about
+ RDT can be found in the Intel x86 Architecture Software
+ Developer Manual.
+
+ Say N if unsure.
+
if X86_32
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8f82b02934fa..0c45cc8e64ba 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -7,9 +7,9 @@
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel_rdt_common.h>
#include "../perf_event.h"
-#define MSR_IA32_PQR_ASSOC 0x0c8f
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
@@ -24,32 +24,13 @@ static unsigned int cqm_l3_scale; /* supposedly cacheline size */
static bool cqm_enabled, mbm_enabled;
unsigned int mbm_socket_max;
-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid: The cached Resource Monitoring ID
- * @closid: The cached Class Of Service ID
- * @rmid_usecnt: The usage counter for rmid
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
- u32 rmid;
- u32 closid;
- int rmid_usecnt;
-};
-
/*
* The cached intel_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Both functions which modify the state
* (intel_cqm_event_start and intel_cqm_event_stop) are called with
* interrupts disabled, which is sufficient for the protection.
*/
-static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
static struct hrtimer *mbm_timers;
/**
* struct sample - mbm event's (local or total) data
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 6ccbf1aaa7ce..eafee3161d1c 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -189,6 +189,9 @@
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
@@ -222,6 +225,7 @@
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644
index 000000000000..95ce5c85b009
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -0,0 +1,224 @@
+#ifndef _ASM_X86_INTEL_RDT_H
+#define _ASM_X86_INTEL_RDT_H
+
+#ifdef CONFIG_INTEL_RDT_A
+
+#include <linux/kernfs.h>
+#include <linux/jump_label.h>
+
+#include <asm/intel_rdt_common.h>
+
+#define IA32_L3_QOS_CFG 0xc81
+#define IA32_L3_CBM_BASE 0xc90
+#define IA32_L2_CBM_BASE 0xd10
+
+#define L3_QOS_CDP_ENABLE 0x01ULL
+
+/**
+ * struct rdtgroup - store rdtgroup's data in resctrl file system.
+ * @kn: kernfs node
+ * @rdtgroup_list: linked list for all rdtgroups
+ * @closid: closid for this rdtgroup
+ * @cpu_mask: CPUs assigned to this rdtgroup
+ * @flags: status bits
+ * @waitcount: how many cpus expect to find this
+ * group when they acquire rdtgroup_mutex
+ */
+struct rdtgroup {
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ int closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+};
+
+/* rdtgroup.flags */
+#define RDT_DELETED 1
+
+/* List of all resource groups */
+extern struct list_head rdt_all_groups;
+
+int __init rdtgroup_init(void);
+
+/**
+ * struct rftype - describe each file in the resctrl file system
+ * @name: file name
+ * @mode: access mode
+ * @kf_ops: operations
+ * @seq_show: show content of the file
+ * @write: write to the file
+ */
+struct rftype {
+ char *name;
+ umode_t mode;
+ struct kernfs_ops *kf_ops;
+
+ int (*seq_show)(struct kernfs_open_file *of,
+ struct seq_file *sf, void *v);
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+};
+
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @enabled: Is this feature enabled on this machine
+ * @capable: Is this feature available on this machine
+ * @name: Name to use in "schemata" file
+ * @num_closid: Number of CLOSIDs available
+ * @max_cbm: Largest Cache Bit Mask allowed
+ * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * in a cache bit mask
+ * @domains: All domains for this resource
+ * @num_domains: Number of domains active
+ * @msr_base: Base MSR address for CBMs
+ * @tmp_cbms: Scratch space when updating schemata
+ * @num_tmp_cbms: Number of CBMs in tmp_cbms
+ * @cache_level: Which cache level defines scope of this domain
+ * @cbm_idx_multi: Multiplier of CBM index
+ * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
+ * closid * cbm_idx_multi + cbm_idx_offset
+ */
+struct rdt_resource {
+ bool enabled;
+ bool capable;
+ char *name;
+ int num_closid;
+ int cbm_len;
+ int min_cbm_bits;
+ u32 max_cbm;
+ struct list_head domains;
+ int num_domains;
+ int msr_base;
+ u32 *tmp_cbms;
+ int num_tmp_cbms;
+ int cache_level;
+ int cbm_idx_multi;
+ int cbm_idx_offset;
+};
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which cpus share this resource
+ * @cbm: array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ u32 *cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res: The resource to use
+ * @low: Beginning index from base MSR
+ * @high: End index
+ */
+struct msr_param {
+ struct rdt_resource *res;
+ int low;
+ int high;
+};
+
+extern struct mutex rdtgroup_mutex;
+
+extern struct rdt_resource rdt_resources_all[];
+extern struct rdtgroup rdtgroup_default;
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+
+int __init rdtgroup_init(void);
+
+enum {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE,
+ RDT_RESOURCE_L2,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->capable)
+
+#define for_each_enabled_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->enabled)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+ struct {
+ unsigned int cbm_len:5;
+ } split;
+ unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
+union cpuid_0x10_1_edx {
+ struct {
+ unsigned int cos_max:16;
+ } split;
+ unsigned int full;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+void rdt_cbm_update(void *arg);
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
+void rdtgroup_kn_unlock(struct kernfs_node *kn);
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v);
+
+/*
+ * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ * which supports resource control and we enable by mounting the
+ * resctrl file system.
+ * - Caches the per cpu CLOSid values and does the MSR write only
+ * when a task with a different CLOSid is scheduled in.
+ *
+ * Must be called with preemption disabled.
+ */
+static inline void intel_rdt_sched_in(void)
+{
+ if (static_branch_likely(&rdt_enable_key)) {
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ int closid;
+
+ /*
+ * If this task has a closid assigned, use it.
+ * Else use the closid assigned to this cpu.
+ */
+ closid = current->closid;
+ if (closid == 0)
+ closid = this_cpu_read(cpu_closid);
+
+ if (closid != state->closid) {
+ state->closid = closid;
+ wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
+ }
+ }
+}
+
+#else
+
+static inline void intel_rdt_sched_in(void) {}
+
+#endif /* CONFIG_INTEL_RDT_A */
+#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
new file mode 100644
index 000000000000..b31081b89407
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt_common.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_INTEL_RDT_COMMON_H
+#define _ASM_X86_INTEL_RDT_COMMON_H
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid: The cached Resource Monitoring ID
+ * @closid: The cached Class Of Service ID
+ * @rmid_usecnt: The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+ u32 rmid;
+ u32 closid;
+ int rmid_usecnt;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 33b63670bf09..52000010c62e 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -32,6 +32,8 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
+obj-$(CONFIG_INTEL_RDT_A) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
+
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index be6337156502..0282b0df004a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -153,6 +153,7 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
+ unsigned int id;
unsigned long size;
struct amd_northbridge *nb;
};
@@ -894,6 +895,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
static void ci_leaf_init(struct cacheinfo *this_leaf,
struct _cpuid4_info_regs *base)
{
+ this_leaf->id = base->id;
+ this_leaf->attributes = CACHE_ID;
this_leaf->level = base->eax.split.level;
this_leaf->type = cache_type_map[base->eax.split.type];
this_leaf->coherency_line_size =
@@ -920,6 +923,22 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
+/*
+ * The max shared threads number comes from CPUID.4:EAX[25-14] with input
+ * ECX as cache index. Then right shift apicid by the number's order to get
+ * cache id for this cache node.
+ */
+static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned long num_threads_sharing;
+ int index_msb;
+
+ num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
+ index_msb = get_count_order(num_threads_sharing);
+ id4_regs->id = c->apicid >> index_msb;
+}
+
static int __populate_cache_leaves(unsigned int cpu)
{
unsigned int idx, ret;
@@ -931,6 +950,7 @@ static int __populate_cache_leaves(unsigned int cpu)
ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
if (ret)
return ret;
+ get_cache_id(cpu, &id4_regs);
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
new file mode 100644
index 000000000000..5a533fefefa0
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -0,0 +1,403 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Tony Luck <tony.luck@intel.com>
+ * Vikas Shivappa <vikas.shivappa@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpuhotplug.h>
+
+#include <asm/intel-family.h>
+#include <asm/intel_rdt.h>
+
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+
+struct rdt_resource rdt_resources_all[] = {
+ {
+ .name = "L3",
+ .domains = domain_init(RDT_RESOURCE_L3),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3DATA",
+ .domains = domain_init(RDT_RESOURCE_L3DATA),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3CODE",
+ .domains = domain_init(RDT_RESOURCE_L3CODE),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 1
+ },
+ {
+ .name = "L2",
+ .domains = domain_init(RDT_RESOURCE_L2),
+ .msr_base = IA32_L2_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 2,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+};
+
+static int cbm_idx(struct rdt_resource *r, int closid)
+{
+ return closid * r->cbm_idx_multi + r->cbm_idx_offset;
+}
+
+/*
+ * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
+ * as they do not have CPUID enumeration support for Cache allocation.
+ * The check for Vendor/Family/Model is not enough to guarantee that
+ * the MSRs won't #GP fault because only the following SKUs support
+ * CAT:
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
+ * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
+ *
+ * Probe by trying to write the first of the L3 cach mask registers
+ * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
+ * is always 20 on hsw server parts. The minimum cache bitmask length
+ * allowed for HSW server is always 2 bits. Hardcode all of them.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+ u32 l, h, max_cbm = BIT_MASK(20) - 1;
+
+ if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+ return false;
+ rdmsr(IA32_L3_CBM_BASE, l, h);
+
+ /* If all the bits were set in MSR, return success */
+ if (l != max_cbm)
+ return false;
+
+ r->num_closid = 4;
+ r->cbm_len = 20;
+ r->max_cbm = max_cbm;
+ r->min_cbm_bits = 2;
+ r->capable = true;
+ r->enabled = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+static void rdt_get_config(int idx, struct rdt_resource *r)
+{
+ union cpuid_0x10_1_eax eax;
+ union cpuid_0x10_1_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->cbm_len = eax.split.cbm_len + 1;
+ r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->capable = true;
+ r->enabled = true;
+}
+
+static void rdt_get_cdp_l3_config(int type)
+{
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r = &rdt_resources_all[type];
+
+ r->num_closid = r_l3->num_closid / 2;
+ r->cbm_len = r_l3->cbm_len;
+ r->max_cbm = r_l3->max_cbm;
+ r->capable = true;
+ /*
+ * By default, CDP is disabled. CDP can be enabled by mount parameter
+ * "cdp" during resctrl file system mount time.
+ */
+ r->enabled = false;
+}
+
+static inline bool get_rdt_resources(void)
+{
+ bool ret = false;
+
+ if (cache_alloc_hsw_probe())
+ return true;
+
+ if (!boot_cpu_has(X86_FEATURE_RDT_A))
+ return false;
+
+ if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
+ rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+ if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+ }
+ ret = true;
+ }
+ if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+ /* CPUID 0x10.2 fields are same format at 0x10.1 */
+ rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static int get_cache_id(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level)
+ return ci->info_list[i].id;
+ }
+
+ return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+ struct msr_param *m = (struct msr_param *)arg;
+ struct rdt_resource *r = m->res;
+ int i, cpu = smp_processor_id();
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->cpu_mask))
+ goto found;
+ }
+ pr_info_once("cpu %d not found in any domain for resource %s\n",
+ cpu, r->name);
+
+ return;
+
+found:
+ for (i = m->low; i < m->high; i++) {
+ int idx = cbm_idx(r, i);
+
+ wrmsrl(r->msr_base + idx, d->cbm[i]);
+ }
+}
+
+/*
+ * rdt_find_domain - Find a domain in a resource that matches input resource id
+ *
+ * Search resource r's domain list to find the resource id. If the resource
+ * id is found in a domain, return the domain. Otherwise, if requested by
+ * caller, return the first domain whose id is bigger than the input id.
+ * The domain list is sorted by id in ascending order.
+ */
+static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+ struct list_head **pos)
+{
+ struct rdt_domain *d;
+ struct list_head *l;
+
+ if (id < 0)
+ return ERR_PTR(id);
+
+ list_for_each(l, &r->domains) {
+ d = list_entry(l, struct rdt_domain, list);
+ /* When id is found, return its domain. */
+ if (id == d->id)
+ return d;
+ /* Stop searching when finding id's position in sorted list. */
+ if (id < d->id)
+ break;
+ }
+
+ if (pos)
+ *pos = l;
+
+ return NULL;
+}
+
+/*
+ * domain_add_cpu - Add a cpu to a resource's domain list.
+ *
+ * If an existing domain in the resource r's domain list matches the cpu's
+ * resource id, add the cpu in the domain.
+ *
+ * Otherwise, a new domain is allocated and inserted into the right position
+ * in the domain list sorted by id in ascending order.
+ *
+ * The order in the domain list is visible to users when we print entries
+ * in the schemata file and schemata input is validated to have the same order
+ * as this list.
+ */
+static void domain_add_cpu(int cpu, struct rdt_resource *r)
+{
+ int i, id = get_cache_id(cpu, r->cache_level);
+ struct list_head *add_pos = NULL;
+ struct rdt_domain *d;
+
+ d = rdt_find_domain(r, id, &add_pos);
+ if (IS_ERR(d)) {
+ pr_warn("Could't find cache id for cpu %d\n", cpu);
+ return;
+ }
+
+ if (d) {
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+ return;
+ }
+
+ d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
+ if (!d)
+ return;
+
+ d->id = id;
+
+ d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
+ if (!d->cbm) {
+ kfree(d);
+ return;
+ }
+
+ for (i = 0; i < r->num_closid; i++) {
+ int idx = cbm_idx(r, i);
+
+ d->cbm[i] = r->max_cbm;
+ wrmsrl(r->msr_base + idx, d->cbm[i]);
+ }
+
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+ list_add_tail(&d->list, add_pos);
+ r->num_domains++;
+}
+
+static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+{
+ int id = get_cache_id(cpu, r->cache_level);
+ struct rdt_domain *d;
+
+ d = rdt_find_domain(r, id, NULL);
+ if (IS_ERR_OR_NULL(d)) {
+ pr_warn("Could't find cache id for cpu %d\n", cpu);
+ return;
+ }
+
+ cpumask_clear_cpu(cpu, &d->cpu_mask);
+ if (cpumask_empty(&d->cpu_mask)) {
+ r->num_domains--;
+ kfree(d->cbm);
+ list_del(&d->list);
+ kfree(d);
+ }
+}
+
+static void clear_closid(int cpu)
+{
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+ per_cpu(cpu_closid, cpu) = 0;
+ state->closid = 0;
+ wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+ for_each_capable_rdt_resource(r)
+ domain_add_cpu(cpu, r);
+ /* The cpu is set in default rdtgroup after online. */
+ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
+ clear_closid(cpu);
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+ for_each_capable_rdt_resource(r)
+ domain_remove_cpu(cpu, r);
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+ break;
+ }
+ clear_closid(cpu);
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int __init intel_rdt_late_init(void)
+{
+ struct rdt_resource *r;
+ int state, ret;
+
+ if (!get_rdt_resources())
+ return -ENODEV;
+
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "x86/rdt/cat:online:",
+ intel_rdt_online_cpu, intel_rdt_offline_cpu);
+ if (state < 0)
+ return state;
+
+ ret = rdtgroup_init();
+ if (ret) {
+ cpuhp_remove_state(state);
+ return ret;
+ }
+
+ for_each_capable_rdt_resource(r)
+ pr_info("Intel RDT %s allocation detected\n", r->name);
+
+ return 0;
+}
+
+late_initcall(intel_rdt_late_init);
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
new file mode 100644
index 000000000000..8af04afdfcb9
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -0,0 +1,1115 @@
+/*
+ * User interface for Resource Alloction in Resource Director Technology(RDT)
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/task_work.h>
+
+#include <uapi/linux/magic.h>
+
+#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_common.h>
+
+DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
+struct kernfs_root *rdt_root;
+struct rdtgroup rdtgroup_default;
+LIST_HEAD(rdt_all_groups);
+
+/* Kernel fs node for "info" directory under root */
+static struct kernfs_node *kn_info;
+
+/*
+ * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+ * we can keep a bitmap of free CLOSIDs in a single integer.
+ *
+ * Using a global CLOSID across all resources has some advantages and
+ * some drawbacks:
+ * + We can simply set "current->closid" to assign a task to a resource
+ * group.
+ * + Context switch code can avoid extra memory references deciding which
+ * CLOSID to load into the PQR_ASSOC MSR
+ * - We give up some options in configuring resource groups across multi-socket
+ * systems.
+ * - Our choices on how to configure each resource become progressively more
+ * limited as the number of resources grows.
+ */
+static int closid_free_map;
+
+static void closid_init(void)
+{
+ struct rdt_resource *r;
+ int rdt_min_closid = 32;
+
+ /* Compute rdt_min_closid across all resources */
+ for_each_enabled_rdt_resource(r)
+ rdt_min_closid = min(rdt_min_closid, r->num_closid);
+
+ closid_free_map = BIT_MASK(rdt_min_closid) - 1;
+
+ /* CLOSID 0 is always reserved for the default group */
+ closid_free_map &= ~1;
+}
+
+int closid_alloc(void)
+{
+ int closid = ffs(closid_free_map);
+
+ if (closid == 0)
+ return -ENOSPC;
+ closid--;
+ closid_free_map &= ~(1 << closid);
+
+ return closid;
+}
+
+static void closid_free(int closid)
+{
+ closid_free_map |= 1 << closid;
+}
+
+/* set uid and gid of rdtgroup dirs and files to that of the creator */
+static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
+{
+ struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = current_fsuid(),
+ .ia_gid = current_fsgid(), };
+
+ if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
+ gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
+ return 0;
+
+ return kernfs_setattr(kn, &iattr);
+}
+
+static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
+{
+ struct kernfs_node *kn;
+ int ret;
+
+ kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+ 0, rft->kf_ops, rft, NULL, NULL);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret) {
+ kernfs_remove(kn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
+ int len)
+{
+ struct rftype *rft;
+ int ret;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ ret = rdtgroup_add_file(kn, rft);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
+ while (--rft >= rfts)
+ kernfs_remove_by_name(kn, rft->name);
+ return ret;
+}
+
+static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+ struct kernfs_open_file *of = m->private;
+ struct rftype *rft = of->kn->priv;
+
+ if (rft->seq_show)
+ return rft->seq_show(of, m, arg);
+ return 0;
+}
+
+static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct rftype *rft = of->kn->priv;
+
+ if (rft->write)
+ return rft->write(of, buf, nbytes, off);
+
+ return -EINVAL;
+}
+
+static struct kernfs_ops rdtgroup_kf_single_ops = {
+ .atomic_write_len = PAGE_SIZE,
+ .write = rdtgroup_file_write,
+ .seq_show = rdtgroup_seqfile_show,
+};
+
+static int rdtgroup_cpus_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+ if (rdtgrp)
+ seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
+ else
+ ret = -ENOENT;
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret;
+}
+
+/*
+ * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * because __switch_to() is executed with interrupts disabled. A local call
+ * from rdt_update_closid() is proteced against __switch_to() because
+ * preemption is disabled.
+ */
+static void rdt_update_cpu_closid(void *closid)
+{
+ if (closid)
+ this_cpu_write(cpu_closid, *(int *)closid);
+ /*
+ * We cannot unconditionally write the MSR because the current
+ * executing task might have its own closid selected. Just reuse
+ * the context switch code.
+ */
+ intel_rdt_sched_in();
+}
+
+/*
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
+ *
+ * Per task closids must have been set up before calling this function.
+ *
+ * The per cpu closids are updated with the smp function call, when @closid
+ * is not NULL. If @closid is NULL then all affected percpu closids must
+ * have been set up before calling this function.
+ */
+static void
+rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
+{
+ int cpu = get_cpu();
+
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_update_cpu_closid(closid);
+ smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
+ put_cpu();
+}
+
+static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ cpumask_var_t tmpmask, newmask;
+ struct rdtgroup *rdtgrp, *r;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+ if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
+ free_cpumask_var(tmpmask);
+ return -ENOMEM;
+ }
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = cpumask_parse(buf, newmask);
+ if (ret)
+ goto unlock;
+
+ /* check that user didn't specify any offline cpus */
+ cpumask_andnot(tmpmask, newmask, cpu_online_mask);
+ if (cpumask_weight(tmpmask)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Check whether cpus are dropped from this group */
+ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+ if (cpumask_weight(tmpmask)) {
+ /* Can't drop from default group */
+ if (rdtgrp == &rdtgroup_default) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ /* Give any dropped cpus to rdtgroup_default */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, tmpmask);
+ rdt_update_closid(tmpmask, &rdtgroup_default.closid);
+ }
+
+ /*
+ * If we added cpus, remove them from previous group that owned them
+ * and update per-cpu closid
+ */
+ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+ if (cpumask_weight(tmpmask)) {
+ list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
+ if (r == rdtgrp)
+ continue;
+ cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
+ }
+ rdt_update_closid(tmpmask, &rdtgrp->closid);
+ }
+
+ /* Done pushing/pulling - update this group with new mask */
+ cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+unlock:
+ rdtgroup_kn_unlock(of->kn);
+ free_cpumask_var(tmpmask);
+ free_cpumask_var(newmask);
+
+ return ret ?: nbytes;
+}
+
+struct task_move_callback {
+ struct callback_head work;
+ struct rdtgroup *rdtgrp;
+};
+
+static void move_myself(struct callback_head *head)
+{
+ struct task_move_callback *callback;
+ struct rdtgroup *rdtgrp;
+
+ callback = container_of(head, struct task_move_callback, work);
+ rdtgrp = callback->rdtgrp;
+
+ /*
+ * If resource group was deleted before this task work callback
+ * was invoked, then assign the task to root group and free the
+ * resource group.
+ */
+ if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+ (rdtgrp->flags & RDT_DELETED)) {
+ current->closid = 0;
+ kfree(rdtgrp);
+ }
+
+ preempt_disable();
+ /* update PQR_ASSOC MSR to make resource group go into effect */
+ intel_rdt_sched_in();
+ preempt_enable();
+
+ kfree(callback);
+}
+
+static int __rdtgroup_move_task(struct task_struct *tsk,
+ struct rdtgroup *rdtgrp)
+{
+ struct task_move_callback *callback;
+ int ret;
+
+ callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+ if (!callback)
+ return -ENOMEM;
+ callback->work.func = move_myself;
+ callback->rdtgrp = rdtgrp;
+
+ /*
+ * Take a refcount, so rdtgrp cannot be freed before the
+ * callback has been invoked.
+ */
+ atomic_inc(&rdtgrp->waitcount);
+ ret = task_work_add(tsk, &callback->work, true);
+ if (ret) {
+ /*
+ * Task is exiting. Drop the refcount and free the callback.
+ * No need to check the refcount as the group cannot be
+ * deleted before the write function unlocks rdtgroup_mutex.
+ */
+ atomic_dec(&rdtgrp->waitcount);
+ kfree(callback);
+ } else {
+ tsk->closid = rdtgrp->closid;
+ }
+ return ret;
+}
+
+static int rdtgroup_task_write_permission(struct task_struct *task,
+ struct kernfs_open_file *of)
+{
+ const struct cred *tcred = get_task_cred(task);
+ const struct cred *cred = current_cred();
+ int ret = 0;
+
+ /*
+ * Even if we're attaching all tasks in the thread group, we only
+ * need to check permissions on one of them.
+ */
+ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
+ !uid_eq(cred->euid, tcred->uid) &&
+ !uid_eq(cred->euid, tcred->suid))
+ ret = -EPERM;
+
+ put_cred(tcred);
+ return ret;
+}
+
+static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
+ struct kernfs_open_file *of)
+{
+ struct task_struct *tsk;
+ int ret;
+
+ rcu_read_lock();
+ if (pid) {
+ tsk = find_task_by_vpid(pid);
+ if (!tsk) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+ } else {
+ tsk = current;
+ }
+
+ get_task_struct(tsk);
+ rcu_read_unlock();
+
+ ret = rdtgroup_task_write_permission(tsk, of);
+ if (!ret)
+ ret = __rdtgroup_move_task(tsk, rdtgrp);
+
+ put_task_struct(tsk);
+ return ret;
+}
+
+static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+ pid_t pid;
+
+ if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ return -EINVAL;
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+ if (rdtgrp)
+ ret = rdtgroup_move_task(pid, rdtgrp, of);
+ else
+ ret = -ENOENT;
+
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret ?: nbytes;
+}
+
+static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+{
+ struct task_struct *p, *t;
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if (t->closid == r->closid)
+ seq_printf(s, "%d\n", t->pid);
+ }
+ rcu_read_unlock();
+}
+
+static int rdtgroup_tasks_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (rdtgrp)
+ show_rdt_tasks(rdtgrp, s);
+ else
+ ret = -ENOENT;
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret;
+}
+
+/* Files in each rdtgroup */
+static struct rftype rdtgroup_base_files[] = {
+ {
+ .name = "cpus",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_cpus_write,
+ .seq_show = rdtgroup_cpus_show,
+ },
+ {
+ .name = "tasks",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_tasks_write,
+ .seq_show = rdtgroup_tasks_show,
+ },
+ {
+ .name = "schemata",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_schemata_write,
+ .seq_show = rdtgroup_schemata_show,
+ },
+};
+
+static int rdt_num_closids_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%d\n", r->num_closid);
+
+ return 0;
+}
+
+static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%x\n", r->max_cbm);
+
+ return 0;
+}
+
+static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%d\n", r->min_cbm_bits);
+
+ return 0;
+}
+
+/* rdtgroup information files for one cache resource. */
+static struct rftype res_info_files[] = {
+ {
+ .name = "num_closids",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_num_closids_show,
+ },
+ {
+ .name = "cbm_mask",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_cbm_mask_show,
+ },
+ {
+ .name = "min_cbm_bits",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_min_cbm_bits_show,
+ },
+};
+
+static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+{
+ struct kernfs_node *kn_subdir;
+ struct rdt_resource *r;
+ int ret;
+
+ /* create the directory */
+ kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+ if (IS_ERR(kn_info))
+ return PTR_ERR(kn_info);
+ kernfs_get(kn_info);
+
+ for_each_enabled_rdt_resource(r) {
+ kn_subdir = kernfs_create_dir(kn_info, r->name,
+ kn_info->mode, r);
+ if (IS_ERR(kn_subdir)) {
+ ret = PTR_ERR(kn_subdir);
+ goto out_destroy;
+ }
+ kernfs_get(kn_subdir);
+ ret = rdtgroup_kn_set_ugid(kn_subdir);
+ if (ret)
+ goto out_destroy;
+ ret = rdtgroup_add_files(kn_subdir, res_info_files,
+ ARRAY_SIZE(res_info_files));
+ if (ret)
+ goto out_destroy;
+ kernfs_activate(kn_subdir);
+ }
+
+ /*
+ * This extra ref will be put in kernfs_remove() and guarantees
+ * that @rdtgrp->kn is always accessible.
+ */
+ kernfs_get(kn_info);
+
+ ret = rdtgroup_kn_set_ugid(kn_info);
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn_info);
+
+ return 0;
+
+out_destroy:
+ kernfs_remove(kn_info);
+ return ret;
+}
+
+static void l3_qos_cfg_update(void *arg)
+{
+ bool *enable = arg;
+
+ wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+{
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ list_for_each_entry(d, &r->domains, list) {
+ /* Pick one CPU from each domain instance to update MSR */
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ }
+ cpu = get_cpu();
+ /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ l3_qos_cfg_update(&enable);
+ /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
+ smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+static int cdp_enable(void)
+{
+ struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+ struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ int ret;
+
+ if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
+ return -EINVAL;
+
+ ret = set_l3_qos_cfg(r_l3, true);
+ if (!ret) {
+ r_l3->enabled = false;
+ r_l3data->enabled = true;
+ r_l3code->enabled = true;
+ }
+ return ret;
+}
+
+static void cdp_disable(void)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ r->enabled = r->capable;
+
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
+ rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
+ rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
+ set_l3_qos_cfg(r, false);
+ }
+}
+
+static int parse_rdtgroupfs_options(char *data)
+{
+ char *token, *o = data;
+ int ret = 0;
+
+ while ((token = strsep(&o, ",")) != NULL) {
+ if (!*token)
+ return -EINVAL;
+
+ if (!strcmp(token, "cdp"))
+ ret = cdp_enable();
+ }
+
+ return ret;
+}
+
+/*
+ * We don't allow rdtgroup directories to be created anywhere
+ * except the root directory. Thus when looking for the rdtgroup
+ * structure for a kernfs node we are either looking at a directory,
+ * in which case the rdtgroup structure is pointed at by the "priv"
+ * field, otherwise we have a file, and need only look to the parent
+ * to find the rdtgroup.
+ */
+static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
+{
+ if (kernfs_type(kn) == KERNFS_DIR) {
+ /*
+ * All the resource directories use "kn->priv"
+ * to point to the "struct rdtgroup" for the
+ * resource. "info" and its subdirectories don't
+ * have rdtgroup structures, so return NULL here.
+ */
+ if (kn == kn_info || kn->parent == kn_info)
+ return NULL;
+ else
+ return kn->priv;
+ } else {
+ return kn->parent->priv;
+ }
+}
+
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
+{
+ struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+ if (!rdtgrp)
+ return NULL;
+
+ atomic_inc(&rdtgrp->waitcount);
+ kernfs_break_active_protection(kn);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ /* Was this group deleted while we waited? */
+ if (rdtgrp->flags & RDT_DELETED)
+ return NULL;
+
+ return rdtgrp;
+}
+
+void rdtgroup_kn_unlock(struct kernfs_node *kn)
+{
+ struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+ if (!rdtgrp)
+ return;
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+ (rdtgrp->flags & RDT_DELETED)) {
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kfree(rdtgrp);
+ } else {
+ kernfs_unbreak_active_protection(kn);
+ }
+}
+
+static struct dentry *rdt_mount(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data)
+{
+ struct dentry *dentry;
+ int ret;
+
+ mutex_lock(&rdtgroup_mutex);
+ /*
+ * resctrl file system can only be mounted once.
+ */
+ if (static_branch_unlikely(&rdt_enable_key)) {
+ dentry = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ ret = parse_rdtgroupfs_options(data);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_cdp;
+ }
+
+ closid_init();
+
+ ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_cdp;
+ }
+
+ dentry = kernfs_mount(fs_type, flags, rdt_root,
+ RDTGROUP_SUPER_MAGIC, NULL);
+ if (IS_ERR(dentry))
+ goto out_cdp;
+
+ static_branch_enable(&rdt_enable_key);
+ goto out;
+
+out_cdp:
+ cdp_disable();
+out:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return dentry;
+}
+
+static int reset_all_cbms(struct rdt_resource *r)
+{
+ struct msr_param msr_param;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int i, cpu;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ msr_param.res = r;
+ msr_param.low = 0;
+ msr_param.high = r->num_closid;
+
+ /*
+ * Disable resource control for this resource by setting all
+ * CBMs in all domains to the maximum mask value. Pick one CPU
+ * from each domain to update the MSRs below.
+ */
+ list_for_each_entry(d, &r->domains, list) {
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+
+ for (i = 0; i < r->num_closid; i++)
+ d->cbm[i] = r->max_cbm;
+ }
+ cpu = get_cpu();
+ /* Update CBM on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_cbm_update(&msr_param);
+ /* Update CBM on all other cpus in cpu_mask. */
+ smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+/*
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
+ * in the systems are moved unconditionally (used for teardown).
+ *
+ * If @mask is not NULL the cpus on which moved tasks are running are set
+ * in that mask so the update smp function call is restricted to affected
+ * cpus.
+ */
+static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ struct cpumask *mask)
+{
+ struct task_struct *p, *t;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(p, t) {
+ if (!from || t->closid == from->closid) {
+ t->closid = to->closid;
+#ifdef CONFIG_SMP
+ /*
+ * This is safe on x86 w/o barriers as the ordering
+ * of writing to task_cpu() and t->on_cpu is
+ * reverse to the reading here. The detection is
+ * inaccurate as tasks might move or schedule
+ * before the smp function call takes place. In
+ * such a case the function call is pointless, but
+ * there is no other side effect.
+ */
+ if (mask && t->on_cpu)
+ cpumask_set_cpu(task_cpu(t), mask);
+#endif
+ }
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Forcibly remove all of subdirectories under root.
+ */
+static void rmdir_all_sub(void)
+{
+ struct rdtgroup *rdtgrp, *tmp;
+
+ /* Move all tasks to the default resource group */
+ rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
+
+ list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
+ /* Remove each rdtgroup other than root */
+ if (rdtgrp == &rdtgroup_default)
+ continue;
+
+ /*
+ * Give any CPUs back to the default group. We cannot copy
+ * cpu_online_mask because a CPU might have executed the
+ * offline callback already, but is still marked online.
+ */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+ kernfs_remove(rdtgrp->kn);
+ list_del(&rdtgrp->rdtgroup_list);
+ kfree(rdtgrp);
+ }
+ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+ get_online_cpus();
+ rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+ put_online_cpus();
+
+ kernfs_remove(kn_info);
+}
+
+static void rdt_kill_sb(struct super_block *sb)
+{
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ /*Put everything back to default values. */
+ for_each_enabled_rdt_resource(r)
+ reset_all_cbms(r);
+ cdp_disable();
+ rmdir_all_sub();
+ static_branch_disable(&rdt_enable_key);
+ kernfs_kill_sb(sb);
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+static struct file_system_type rdt_fs_type = {
+ .name = "resctrl",
+ .mount = rdt_mount,
+ .kill_sb = rdt_kill_sb,
+};
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+ umode_t mode)
+{
+ struct rdtgroup *parent, *rdtgrp;
+ struct kernfs_node *kn;
+ int ret, closid;
+
+ /* Only allow mkdir in the root directory */
+ if (parent_kn != rdtgroup_default.kn)
+ return -EPERM;
+
+ /* Do not accept '\n' to avoid unparsable situation. */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
+ parent = rdtgroup_kn_lock_live(parent_kn);
+ if (!parent) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = closid_alloc();
+ if (ret < 0)
+ goto out_unlock;
+ closid = ret;
+
+ /* allocate the rdtgroup. */
+ rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
+ if (!rdtgrp) {
+ ret = -ENOSPC;
+ goto out_closid_free;
+ }
+ rdtgrp->closid = closid;
+ list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+
+ /* kernfs creates the directory for rdtgrp */
+ kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+ if (IS_ERR(kn)) {
+ ret = PTR_ERR(kn);
+ goto out_cancel_ref;
+ }
+ rdtgrp->kn = kn;
+
+ /*
+ * kernfs_remove() will drop the reference count on "kn" which
+ * will free it. But we still need it to stick around for the
+ * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+ * here, which will be dropped inside rdtgroup_kn_unlock().
+ */
+ kernfs_get(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+
+ ret = rdtgroup_add_files(kn, rdtgroup_base_files,
+ ARRAY_SIZE(rdtgroup_base_files));
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn);
+
+ ret = 0;
+ goto out_unlock;
+
+out_destroy:
+ kernfs_remove(rdtgrp->kn);
+out_cancel_ref:
+ list_del(&rdtgrp->rdtgroup_list);
+ kfree(rdtgrp);
+out_closid_free:
+ closid_free(closid);
+out_unlock:
+ rdtgroup_kn_unlock(parent_kn);
+ return ret;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+ int ret, cpu, closid = rdtgroup_default.closid;
+ struct rdtgroup *rdtgrp;
+ cpumask_var_t tmpmask;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
+ rdtgrp = rdtgroup_kn_lock_live(kn);
+ if (!rdtgrp) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Give any tasks back to the default group */
+ rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
+
+ /* Give any CPUs back to the default group */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+ /* Update per cpu closid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_closid, cpu) = closid;
+ /*
+ * Update the MSR on moved CPUs and CPUs which have moved
+ * task running on them.
+ */
+ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+ rdt_update_closid(tmpmask, NULL);
+
+ rdtgrp->flags = RDT_DELETED;
+ closid_free(rdtgrp->closid);
+ list_del(&rdtgrp->rdtgroup_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ ret = 0;
+out:
+ rdtgroup_kn_unlock(kn);
+ free_cpumask_var(tmpmask);
+ return ret;
+}
+
+static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
+{
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled)
+ seq_puts(seq, ",cdp");
+ return 0;
+}
+
+static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
+ .mkdir = rdtgroup_mkdir,
+ .rmdir = rdtgroup_rmdir,
+ .show_options = rdtgroup_show_options,
+};
+
+static int __init rdtgroup_setup_root(void)
+{
+ int ret;
+
+ rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
+ KERNFS_ROOT_CREATE_DEACTIVATED,
+ &rdtgroup_default);
+ if (IS_ERR(rdt_root))
+ return PTR_ERR(rdt_root);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgroup_default.closid = 0;
+ list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
+
+ ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
+ ARRAY_SIZE(rdtgroup_base_files));
+ if (ret) {
+ kernfs_destroy_root(rdt_root);
+ goto out;
+ }
+
+ rdtgroup_default.kn = rdt_root->kn;
+ kernfs_activate(rdtgroup_default.kn);
+
+out:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return ret;
+}
+
+/*
+ * rdtgroup_init - rdtgroup initialization
+ *
+ * Setup resctrl file system including set up root, create mount point,
+ * register rdtgroup filesystem, and initialize files under root directory.
+ *
+ * Return: 0 on success or -errno
+ */
+int __init rdtgroup_init(void)
+{
+ int ret = 0;
+
+ ret = rdtgroup_setup_root();
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+ if (ret)
+ goto cleanup_root;
+
+ ret = register_filesystem(&rdt_fs_type);
+ if (ret)
+ goto cleanup_mountpoint;
+
+ return 0;
+
+cleanup_mountpoint:
+ sysfs_remove_mount_point(fs_kobj, "resctrl");
+cleanup_root:
+ kernfs_destroy_root(rdt_root);
+
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
new file mode 100644
index 000000000000..f369cb8db0d5
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -0,0 +1,245 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Tony Luck <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * Check whether a cache bit mask is valid. The SDM says:
+ * Please note that all (and only) contiguous '1' combinations
+ * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
+ * Additionally Haswell requires at least two bits set.
+ */
+static bool cbm_validate(unsigned long var, struct rdt_resource *r)
+{
+ unsigned long first_bit, zero_bit;
+
+ if (var == 0 || var > r->max_cbm)
+ return false;
+
+ first_bit = find_first_bit(&var, r->cbm_len);
+ zero_bit = find_next_zero_bit(&var, r->cbm_len, first_bit);
+
+ if (find_next_bit(&var, r->cbm_len, zero_bit) < r->cbm_len)
+ return false;
+
+ if ((zero_bit - first_bit) < r->min_cbm_bits)
+ return false;
+ return true;
+}
+
+/*
+ * Read one cache bit mask (hex). Check that it is valid for the current
+ * resource type.
+ */
+static int parse_cbm(char *buf, struct rdt_resource *r)
+{
+ unsigned long data;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
+ if (!cbm_validate(data, r))
+ return -EINVAL;
+ r->tmp_cbms[r->num_tmp_cbms++] = data;
+
+ return 0;
+}
+
+/*
+ * For each domain in this resource we expect to find a series of:
+ * id=mask
+ * separated by ";". The "id" is in decimal, and must appear in the
+ * right order.
+ */
+static int parse_line(char *line, struct rdt_resource *r)
+{
+ char *dom = NULL, *id;
+ struct rdt_domain *d;
+ unsigned long dom_id;
+
+ list_for_each_entry(d, &r->domains, list) {
+ dom = strsep(&line, ";");
+ if (!dom)
+ return -EINVAL;
+ id = strsep(&dom, "=");
+ if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
+ return -EINVAL;
+ if (parse_cbm(dom, r))
+ return -EINVAL;
+ }
+
+ /* Any garbage at the end of the line? */
+ if (line && line[0])
+ return -EINVAL;
+ return 0;
+}
+
+static int update_domains(struct rdt_resource *r, int closid)
+{
+ struct msr_param msr_param;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int cpu, idx = 0;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ msr_param.low = closid;
+ msr_param.high = msr_param.low + 1;
+ msr_param.res = r;
+
+ list_for_each_entry(d, &r->domains, list) {
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+ }
+ cpu = get_cpu();
+ /* Update CBM on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_cbm_update(&msr_param);
+ /* Update CBM on other cpus. */
+ smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ char *tok, *resname;
+ int closid, ret = 0;
+ u32 *l3_cbms = NULL;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ closid = rdtgrp->closid;
+
+ /* get scratch space to save all the masks while we validate input */
+ for_each_enabled_rdt_resource(r) {
+ r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+ GFP_KERNEL);
+ if (!r->tmp_cbms) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ r->num_tmp_cbms = 0;
+ }
+
+ while ((tok = strsep(&buf, "\n")) != NULL) {
+ resname = strsep(&tok, ":");
+ if (!tok) {
+ ret = -EINVAL;
+ goto out;
+ }
+ for_each_enabled_rdt_resource(r) {
+ if (!strcmp(resname, r->name) &&
+ closid < r->num_closid) {
+ ret = parse_line(tok, r);
+ if (ret)
+ goto out;
+ break;
+ }
+ }
+ if (!r->name) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Did the parser find all the masks we need? */
+ for_each_enabled_rdt_resource(r) {
+ if (r->num_tmp_cbms != r->num_domains) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for_each_enabled_rdt_resource(r) {
+ ret = update_domains(r, closid);
+ if (ret)
+ goto out;
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ for_each_enabled_rdt_resource(r) {
+ kfree(r->tmp_cbms);
+ r->tmp_cbms = NULL;
+ }
+ return ret ?: nbytes;
+}
+
+static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+{
+ struct rdt_domain *dom;
+ bool sep = false;
+
+ seq_printf(s, "%s:", r->name);
+ list_for_each_entry(dom, &r->domains, list) {
+ if (sep)
+ seq_puts(s, ";");
+ seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+ sep = true;
+ }
+ seq_puts(s, "\n");
+}
+
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ int closid, ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (rdtgrp) {
+ closid = rdtgrp->closid;
+ for_each_enabled_rdt_resource(r) {
+ if (closid < r->num_closid)
+ show_doms(s, r, closid);
+ }
+ } else {
+ ret = -ENOENT;
+ }
+ rdtgroup_kn_unlock(of->kn);
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d1316f9c8329..d9794060fe22 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -20,12 +20,15 @@ struct cpuid_bit {
/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
- { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
- { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
- { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d0d744108594..a0ac3e81518a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -53,6 +53,7 @@
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
+#include <asm/intel_rdt.h>
void __show_regs(struct pt_regs *regs, int all)
{
@@ -296,5 +297,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p);
+ /* Load the Intel cache allocation PQR MSR. */
+ intel_rdt_sched_in();
+
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a76b65e3e615..a61e141b6891 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -49,6 +49,7 @@
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
+#include <asm/intel_rdt.h>
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
@@ -476,6 +477,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
loadsegment(ss, __KERNEL_DS);
}
+ /* Load the Intel cache allocation PQR MSR. */
+ intel_rdt_sched_in();
+
return prev_p;
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f61058617ada..f4126cf997a4 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,6 +15,7 @@ config XTENSA
select GENERIC_SCHED_CLOCK
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
index b1f4ee8c9a22..6106bdc097ad 100644
--- a/arch/xtensa/boot/dts/kc705.dts
+++ b/arch/xtensa/boot/dts/kc705.dts
@@ -11,4 +11,20 @@
device_type = "memory";
reg = <0x00000000 0x38000000>;
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x04000000>;
+ alignment = <0x2000>;
+ alloc-ranges = <0x00000000 0x20000000>;
+ linux,cma-default;
+ };
+ };
};
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 28cf4c5d65ef..b7fbaa56b51a 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += bug.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += div64.h
+generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index c31f5d5afc7d..264fb89c444e 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_SMP) += smp.o mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
AFLAGS_head.o += -mtext-section-literals
AFLAGS_mxhead.o += -mtext-section-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 6a16decf278f..70e362e6038e 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -15,6 +15,7 @@
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
+#include <linux/dma-contiguous.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@@ -146,6 +147,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
{
unsigned long ret;
unsigned long uncached = 0;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = NULL;
/* ignore region speicifiers */
@@ -153,11 +156,18 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
- ret = (unsigned long)__get_free_pages(flag, get_order(size));
- if (ret == 0)
+ if (gfpflags_allow_blocking(flag))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+
+ if (!page)
+ page = alloc_pages(flag, get_order(size));
+
+ if (!page)
return NULL;
+ ret = (unsigned long)page_address(page);
+
/* We currently don't support coherent memory outside KSEG */
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
@@ -170,16 +180,19 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
return (void *)uncached;
}
-static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
+static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ struct page *page = virt_to_page(addr);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
- free_pages(addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
}
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644
index 000000000000..07e56e3a9a8b
--- /dev/null
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -0,0 +1,128 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ * we can get rid of this panic for single core (not SMP)
+ */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 88a044af7504..848e8568fb3c 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -31,10 +31,6 @@
# include <linux/console.h>
#endif
-#ifdef CONFIG_RTC
-# include <linux/timex.h>
-#endif
-
#ifdef CONFIG_PROC_FS
# include <linux/seq_file.h>
#endif
@@ -48,24 +44,22 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
-#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <platform/hardware.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
-struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
-#endif
-
-#ifdef CONFIG_BLK_DEV_FD
-extern struct fd_ops no_fd_ops;
-struct fd_ops *fd_ops;
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 24,
+ .orig_video_cols = 80,
+ .orig_video_lines = 24,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16,
+};
#endif
-extern struct rtc_ops no_rtc_ops;
-struct rtc_ops *rtc_ops;
-
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start;
extern unsigned long initrd_end;
@@ -77,7 +71,6 @@ extern int initrd_below_start_ok;
void *dtb_start = __dtb_start;
#endif
-unsigned char aux_device_present;
extern unsigned long loops_per_jiffy;
/* Command line specified as configuration option. */
@@ -317,120 +310,6 @@ extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
-
-#ifdef CONFIG_S32C1I_SELFTEST
-#if XCHAL_HAVE_S32C1I
-
-static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
-
-/*
- * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
- *
- * If *v == cmp, set *v = set. Return previous *v.
- */
-static inline int probed_compare_swap(int *v, int cmp, int set)
-{
- int tmp;
-
- __asm__ __volatile__(
- " movi %1, 1f\n"
- " s32i %1, %4, 0\n"
- " wsr %2, scompare1\n"
- "1: s32c1i %0, %3, 0\n"
- : "=a" (set), "=&a" (tmp)
- : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
- : "memory"
- );
- return set;
-}
-
-/* Handle probed exception */
-
-static void __init do_probed_exception(struct pt_regs *regs,
- unsigned long exccause)
-{
- if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
- regs->pc += 3; /* skip the s32c1i instruction */
- rcw_exc = exccause;
- } else {
- do_unhandled(regs, exccause);
- }
-}
-
-/* Simple test of S32C1I (soc bringup assist) */
-
-static int __init check_s32c1i(void)
-{
- int n, cause1, cause2;
- void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
-
- rcw_probe_pc = 0;
- handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
- do_probed_exception);
- handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
- do_probed_exception);
- handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
- do_probed_exception);
-
- /* First try an S32C1I that does not store: */
- rcw_exc = 0;
- rcw_word = 1;
- n = probed_compare_swap(&rcw_word, 0, 2);
- cause1 = rcw_exc;
-
- /* took exception? */
- if (cause1 != 0) {
- /* unclean exception? */
- if (n != 2 || rcw_word != 1)
- panic("S32C1I exception error");
- } else if (rcw_word != 1 || n != 1) {
- panic("S32C1I compare error");
- }
-
- /* Then an S32C1I that stores: */
- rcw_exc = 0;
- rcw_word = 0x1234567;
- n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
- cause2 = rcw_exc;
-
- if (cause2 != 0) {
- /* unclean exception? */
- if (n != 0xabcde || rcw_word != 0x1234567)
- panic("S32C1I exception error (b)");
- } else if (rcw_word != 0xabcde || n != 0x1234567) {
- panic("S32C1I store error");
- }
-
- /* Verify consistency of exceptions: */
- if (cause1 || cause2) {
- pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
- /* If emulation of S32C1I upon bus error gets implemented,
- we can get rid of this panic for single core (not SMP) */
- panic("S32C1I exceptions not currently supported");
- }
- if (cause1 != cause2)
- panic("inconsistent S32C1I exceptions");
-
- trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
- trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
- trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
- return 0;
-}
-
-#else /* XCHAL_HAVE_S32C1I */
-
-/* This condition should not occur with a commercially deployed processor.
- Display reminder for early engr test or demo chips / FPGA bitstreams */
-static int __init check_s32c1i(void)
-{
- pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
- return 0;
-}
-
-#endif /* XCHAL_HAVE_S32C1I */
-early_initcall(check_s32c1i);
-#endif /* CONFIG_S32C1I_SELFTEST */
-
static inline int mem_reserve(unsigned long start, unsigned long end)
{
return memblock_reserve(start, end - start);
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 80e4cfb2471a..720fe4e8b497 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
#include <asm/bootparam.h>
#include <asm/page.h>
@@ -60,6 +61,7 @@ void __init bootmem_init(void)
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
diff --git a/block/ioctl.c b/block/ioctl.c
index f856963204f4..656c8c6ed206 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|| pstart < 0 || plength < 0 || partno > 65535)
return -EINVAL;
}
+ /* check if partition is aligned to blocksize */
+ if (p.start & (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
mutex_lock(&bdev->bd_mutex);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 0774799942e0..c6fee7437be4 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(WRITE_SAME, filter->write_ok);
+ __set_bit(WRITE_SAME_16, filter->write_ok);
+ __set_bit(WRITE_SAME_32, filter->write_ok);
__set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok);
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7dd527f8ca1d..94be8a8e6c08 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -166,6 +166,12 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table);
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc);
+
/*
* tbxfload
*/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 5fb838e592dc..81473a4880ce 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -311,6 +311,8 @@ void acpi_tb_parse_fadt(void)
{
u32 length;
struct acpi_table_header *table;
+ struct acpi_table_desc *fadt_desc;
+ acpi_status status;
/*
* The FADT has multiple versions with different lengths,
@@ -319,14 +321,12 @@ void acpi_tb_parse_fadt(void)
* Get a local copy of the FADT and convert it to a common format
* Map entire FADT, assumed to be smaller than one page.
*/
- length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
-
- table =
- acpi_os_map_memory(acpi_gbl_root_table_list.
- tables[acpi_gbl_fadt_index].address, length);
- if (!table) {
+ fadt_desc = &acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index];
+ status = acpi_tb_get_table(fadt_desc, &table);
+ if (ACPI_FAILURE(status)) {
return;
}
+ length = fadt_desc->length;
/*
* Validate the FADT checksum before we copy the table. Ignore
@@ -340,7 +340,7 @@ void acpi_tb_parse_fadt(void)
/* All done with the real FADT, unmap it */
- acpi_os_unmap_memory(table, length);
+ acpi_tb_put_table(fadt_desc);
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 51eb07cf9898..86854e846800 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -381,3 +381,88 @@ next_table:
acpi_os_unmap_memory(table, length);
return_ACPI_STATUS(AE_OK);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_get_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * out_table - Where the pointer to the table is returned
+ *
+ * RETURN: Status and pointer to the requested table
+ *
+ * DESCRIPTION: Increase a reference to a table descriptor and return the
+ * validated table pointer.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_tb_get_table);
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "VALIDATED" */
+
+ status = acpi_tb_validate_table(table_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ table_desc->validation_count++;
+ if (table_desc->validation_count == 0) {
+ ACPI_ERROR((AE_INFO,
+ "Table %p, Validation count is zero after increment\n",
+ table_desc));
+ table_desc->validation_count--;
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+ *out_table = table_desc->pointer;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_put_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Decrease a reference to a table descriptor and release the
+ * validated table pointer if no references.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc)
+{
+
+ ACPI_FUNCTION_TRACE(acpi_tb_put_table);
+
+ if (table_desc->validation_count == 0) {
+ ACPI_WARNING((AE_INFO,
+ "Table %p, Validation count is zero before decrement\n",
+ table_desc));
+ return_VOID;
+ }
+ table_desc->validation_count--;
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "INVALIDATED" */
+
+ acpi_tb_invalidate_table(table_desc);
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index d5adb7ac4684..7684707b254b 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -282,7 +282,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_get_table_with_size
+ * FUNCTION: acpi_get_table
*
* PARAMETERS: signature - ACPI signature of needed table
* instance - Which instance (for SSDTs)
@@ -292,16 +292,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
*
* DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
* RSDT/XSDT.
+ * Note that an early stage acpi_get_table() call must be paired
+ * with an early stage acpi_put_table() call. otherwise the table
+ * pointer mapped by the early stage mapping implementation may be
+ * erroneously unmapped by the late stage unmapping implementation
+ * in an acpi_put_table() invoked during the late stage.
*
******************************************************************************/
acpi_status
-acpi_get_table_with_size(char *signature,
- u32 instance, struct acpi_table_header **out_table,
- acpi_size *tbl_size)
+acpi_get_table(char *signature,
+ u32 instance, struct acpi_table_header ** out_table)
{
u32 i;
u32 j;
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
+ struct acpi_table_desc *table_desc;
/* Parameter validation */
@@ -309,13 +314,22 @@ acpi_get_table_with_size(char *signature,
return (AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/* Walk the root table list */
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
- (&(acpi_gbl_root_table_list.tables[i].signature),
- signature)) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+
+ if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
continue;
}
@@ -323,43 +337,65 @@ acpi_get_table_with_size(char *signature,
continue;
}
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
- if (ACPI_SUCCESS(status)) {
- *out_table = acpi_gbl_root_table_list.tables[i].pointer;
- *tbl_size = acpi_gbl_root_table_list.tables[i].length;
- }
-
- if (!acpi_gbl_permanent_mmap) {
- acpi_gbl_root_table_list.tables[i].pointer = NULL;
- }
-
- return (status);
+ status = acpi_tb_get_table(table_desc, out_table);
+ break;
}
- return (AE_NOT_FOUND);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return (status);
}
-ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+ACPI_EXPORT_SYMBOL(acpi_get_table)
-acpi_status
-acpi_get_table(char *signature,
- u32 instance, struct acpi_table_header **out_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_put_table
+ *
+ * PARAMETERS: table - The pointer to the table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release a table returned by acpi_get_table() and its clones.
+ * Note that it is not safe if this function was invoked after an
+ * uninstallation happened to the original table descriptor.
+ * Currently there is no OSPMs' requirement to handle such
+ * situations.
+ *
+ ******************************************************************************/
+void acpi_put_table(struct acpi_table_header *table)
{
- acpi_size tbl_size;
+ u32 i;
+ struct acpi_table_desc *table_desc;
+
+ ACPI_FUNCTION_TRACE(acpi_put_table);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Walk the root table list */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
- return acpi_get_table_with_size(signature,
- instance, out_table, &tbl_size);
+ if (table_desc->pointer != table) {
+ continue;
+ }
+
+ acpi_tb_put_table(table_desc);
+ break;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_VOID;
}
-ACPI_EXPORT_SYMBOL(acpi_get_table)
+ACPI_EXPORT_SYMBOL(acpi_put_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_by_index
*
* PARAMETERS: table_index - Table index
- * table - Where the pointer to the table is returned
+ * out_table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to the requested table
*
@@ -368,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
*
******************************************************************************/
acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table)
{
acpi_status status;
@@ -376,35 +412,33 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Parameter validation */
- if (!table) {
+ if (!out_table) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Validate index */
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
}
- if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
-
- /* Table is not mapped, map it */
+ status =
+ acpi_tb_get_table(&acpi_gbl_root_table_list.tables[table_index],
+ out_table);
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.
- tables[table_index]);
- if (ACPI_FAILURE(status)) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(status);
- }
- }
-
- *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5cbefd7621f0..95855cb9d6fb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -974,7 +974,7 @@ void __init acpi_early_init(void)
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
- acpi_gbl_permanent_mmap = 1;
+ acpi_permanent_mmap = true;
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 312c4b4dc363..2f82b8eba360 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2806,12 +2806,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
- status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+ status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* This is ok, we could have an nvdimm hotplugged later */
dev_dbg(dev, "failed to find NFIT at startup\n");
return 0;
}
+ sz = tbl->length;
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
if (!acpi_desc)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9a4c6abee63e..a404ff4d7151 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -76,6 +76,7 @@ static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
static bool acpi_os_initialized;
unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
+bool acpi_permanent_mmap = false;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -306,7 +307,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* virtual address). If not found, map it, add it to that list and return a
* pointer to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
void __iomem *__ref
@@ -322,7 +323,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
return NULL;
}
- if (!acpi_gbl_permanent_mmap)
+ if (!acpi_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
mutex_lock(&acpi_ioremap_lock);
@@ -392,7 +393,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
* mappings, drop a reference to it and unmap it if there are no more active
* references to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
@@ -401,7 +402,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- if (!acpi_gbl_permanent_mmap) {
+ if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
@@ -426,12 +427,6 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
-void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
-{
- if (!acpi_gbl_permanent_mmap)
- __acpi_unmap_table(virt, size);
-}
-
int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5c78ee1860b0..611a5585a902 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -154,18 +154,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
{
struct acpi_table_madt *madt = NULL;
- acpi_size tbl_size;
phys_cpuid_t rv;
- acpi_get_table_with_size(ACPI_SIG_MADT, 0,
- (struct acpi_table_header **)&madt,
- &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
if (!madt)
return PHYS_CPUID_INVALID;
rv = map_madt_entry(madt, 1, acpi_id, true);
- early_acpi_os_unmap_memory(madt, tbl_size);
+ acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 93b00cf4eb39..45dec874ea55 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1120,9 +1120,6 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
- if (!acpi_has_method(handle, "_BQC"))
- printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
- "cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index e8d7bc7d4da8..b8019c4c1d38 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -33,7 +33,6 @@ int __init parse_spcr(bool earlycon)
{
static char opts[64];
struct acpi_table_spcr *table;
- acpi_size table_size;
acpi_status status;
char *uart;
char *iotype;
@@ -43,9 +42,8 @@ int __init parse_spcr(bool earlycon)
if (acpi_disabled)
return -ENODEV;
- status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
- (struct acpi_table_header **)&table,
- &table_size);
+ status = acpi_get_table(ACPI_SIG_SPCR, 0,
+ (struct acpi_table_header **)&table);
if (ACPI_FAILURE(status))
return -ENOENT;
@@ -106,6 +104,6 @@ int __init parse_spcr(bool earlycon)
err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
done:
- early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+ acpi_put_table((struct acpi_table_header *)table);
return err;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index cdd56c4657e0..2604189d6cd1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -333,7 +333,6 @@ acpi_table_parse_entries_array(char *id,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
- acpi_size tbl_size;
int count;
u32 instance = 0;
@@ -346,7 +345,7 @@ acpi_table_parse_entries_array(char *id,
if (!strncmp(id, ACPI_SIG_MADT, 4))
instance = acpi_apic_instance;
- acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
+ acpi_get_table(id, instance, &table_header);
if (!table_header) {
pr_warn("%4.4s not present\n", id);
return -ENODEV;
@@ -355,7 +354,7 @@ acpi_table_parse_entries_array(char *id,
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
- early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+ acpi_put_table(table_header);
return count;
}
@@ -397,7 +396,6 @@ acpi_table_parse_madt(enum acpi_madt_type id,
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
if (acpi_disabled)
return -ENODEV;
@@ -406,13 +404,13 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
- acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
+ acpi_get_table(id, acpi_apic_instance, &table);
else
- acpi_get_table_with_size(id, 0, &table, &tbl_size);
+ acpi_get_table(id, 0, &table);
if (table) {
handler(table);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
return 0;
} else
return -ENODEV;
@@ -426,16 +424,15 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
static void __init check_multiple_madt(void)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
- acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 2, &table);
if (table) {
pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
acpi_apic_instance);
pr_warn("If \"acpi_apic_instance=%d\" works better, "
"notify linux-acpi@vger.kernel.org\n",
acpi_apic_instance ? 0 : 2);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
} else
acpi_apic_instance = 0;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 1e3903d0d994..eb3af2739537 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -363,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev, \
return sprintf(buf, "%u\n", this_leaf->object); \
}
+show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
@@ -444,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
return n;
}
+static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
@@ -457,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
@@ -480,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a98702b7445..3a2ca0f79daf 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -930,7 +930,7 @@ static void __init acpi_cpufreq_boost_init(void)
static void acpi_cpufreq_boost_exit(void)
{
- if (acpi_cpufreq_online >= 0)
+ if (acpi_cpufreq_online > 0)
cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 176e84cc3991..0cb9040eca49 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -107,7 +107,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
+static void s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 4f973a9c7b87..8ec1967a850b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -305,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index e564442b6393..b4e4ec630e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1944,9 +1944,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_lock_cursor(crtc, true);
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -1955,8 +1953,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 6ce7fb42dbef..584abe834a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2438,8 +2438,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 558640aee15a..b323f5ef64d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -411,244 +411,587 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
- if (adev->asic_type == CHIP_VERDE ||
- adev->asic_type == CHIP_OLAND ||
- adev->asic_type == CHIP_HAINAN) {
+ if (adev->asic_type == CHIP_VERDE) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
break;
case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ continue;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else if (adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+ break;
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 23:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
default:
- gb_tile_moden = 0;
- break;
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
@@ -656,239 +999,291 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- default:
- gb_tile_moden = 0;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6324f67bdb1f..d0ec00986f38 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3949,8 +3949,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
- amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
- amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+ if (unique_indices[i] != 0) {
+ amdgpu_mm_wreg(adev, temp + i,
+ unique_indices[i] & 0x3FFFF, false);
+ amdgpu_mm_wreg(adev, data + i,
+ unique_indices[i] >> 20, false);
+ }
}
kfree(register_list_format);
@@ -3966,20 +3970,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
- WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
- WREG32(mmRLC_PG_DELAY, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
+
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
- WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- }
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
@@ -3996,41 +3997,37 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
- WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_GDS |
- AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
-
- if ((adev->asic_type == CHIP_CARRIZO) ||
- (adev->asic_type == CHIP_STONEY)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
- gfx_v8_0_init_power_gating(adev);
- WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
- if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
- cz_enable_sck_slow_down_on_power_up(adev, true);
- cz_enable_sck_slow_down_on_power_down(adev, true);
- } else {
- cz_enable_sck_slow_down_on_power_up(adev, false);
- cz_enable_sck_slow_down_on_power_down(adev, false);
- }
- if (adev->pg_flags & AMD_PG_SUPPORT_CP)
- cz_enable_cp_power_gating(adev, true);
- else
- cz_enable_cp_power_gating(adev, false);
- } else if (adev->asic_type == CHIP_POLARIS11) {
- gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_csb(adev);
+ gfx_v8_0_init_save_restore_list(adev);
+ gfx_v8_0_enable_save_restore_machine(adev);
+ gfx_v8_0_init_power_gating(adev);
}
+
}
static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
@@ -5339,14 +5336,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
- return 0;
-
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
- cz_update_gfx_cg_power_gating(adev, enable);
+
+ cz_update_gfx_cg_power_gating(adev, enable);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
@@ -5791,25 +5785,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -5817,43 +5835,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3ed8ad8725b9..c46b0159007d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -43,13 +43,14 @@
static const u32 tahiti_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
+ 0x340c, 0x000000c0, 0x00800040,
+ 0x360c, 0x000000c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
@@ -60,7 +61,7 @@ static const u32 tahiti_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x23a2, 0x01ff1f3f, 0x00000000,
@@ -73,7 +74,11 @@ static const u32 tahiti_golden_registers[] =
0x2234, 0xffffffff, 0x000fff40,
0x2235, 0x0000001f, 0x00000010,
0x0504, 0x20000000, 0x20fffed8,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 tahiti_golden_registers2[] =
@@ -83,16 +88,18 @@ static const u32 tahiti_golden_registers2[] =
static const u32 tahiti_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
0x30c5, 0xffffffff, 0x00000800,
0x30c3, 0xffffffff, 0x800000f4,
- 0x3d2a, 0xffffffff, 0x00000000
+ 0x3d2a, 0x00000008, 0x00000000
};
static const u32 pitcairn_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -110,7 +117,7 @@ static const u32 pitcairn_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -119,11 +126,16 @@ static const u32 pitcairn_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x32761054,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 pitcairn_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601004,
0x311f, 0xffffffff, 0x10102020,
0x3122, 0xffffffff, 0x01000020,
@@ -133,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] =
static const u32 verde_pg_init[] =
{
- 0xd4f, 0xffffffff, 0x40000,
- 0xd4e, 0xffffffff, 0x200010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x7007,
- 0xd4e, 0xffffffff, 0x300010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x400000,
- 0xd4e, 0xffffffff, 0x100010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x120200,
- 0xd4e, 0xffffffff, 0x500010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x1e1e16,
- 0xd4e, 0xffffffff, 0x600010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x171f1e,
- 0xd4e, 0xffffffff, 0x700010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4e, 0xffffffff, 0x9ff,
- 0xd40, 0xffffffff, 0x0,
- 0xd41, 0xffffffff, 0x10000800,
- 0xd41, 0xffffffff, 0xf,
- 0xd41, 0xffffffff, 0xf,
- 0xd40, 0xffffffff, 0x4,
- 0xd41, 0xffffffff, 0x1000051e,
- 0xd41, 0xffffffff, 0xffff,
- 0xd41, 0xffffffff, 0xffff,
- 0xd40, 0xffffffff, 0x8,
- 0xd41, 0xffffffff, 0x80500,
- 0xd40, 0xffffffff, 0x12,
- 0xd41, 0xffffffff, 0x9050c,
- 0xd40, 0xffffffff, 0x1d,
- 0xd41, 0xffffffff, 0xb052c,
- 0xd40, 0xffffffff, 0x2a,
- 0xd41, 0xffffffff, 0x1053e,
- 0xd40, 0xffffffff, 0x2d,
- 0xd41, 0xffffffff, 0x10546,
- 0xd40, 0xffffffff, 0x30,
- 0xd41, 0xffffffff, 0xa054e,
- 0xd40, 0xffffffff, 0x3c,
- 0xd41, 0xffffffff, 0x1055f,
- 0xd40, 0xffffffff, 0x3f,
- 0xd41, 0xffffffff, 0x10567,
- 0xd40, 0xffffffff, 0x42,
- 0xd41, 0xffffffff, 0x1056f,
- 0xd40, 0xffffffff, 0x45,
- 0xd41, 0xffffffff, 0x10572,
- 0xd40, 0xffffffff, 0x48,
- 0xd41, 0xffffffff, 0x20575,
- 0xd40, 0xffffffff, 0x4c,
- 0xd41, 0xffffffff, 0x190801,
- 0xd40, 0xffffffff, 0x67,
- 0xd41, 0xffffffff, 0x1082a,
- 0xd40, 0xffffffff, 0x6a,
- 0xd41, 0xffffffff, 0x1b082d,
- 0xd40, 0xffffffff, 0x87,
- 0xd41, 0xffffffff, 0x310851,
- 0xd40, 0xffffffff, 0xba,
- 0xd41, 0xffffffff, 0x891,
- 0xd40, 0xffffffff, 0xbc,
- 0xd41, 0xffffffff, 0x893,
- 0xd40, 0xffffffff, 0xbe,
- 0xd41, 0xffffffff, 0x20895,
- 0xd40, 0xffffffff, 0xc2,
- 0xd41, 0xffffffff, 0x20899,
- 0xd40, 0xffffffff, 0xc6,
- 0xd41, 0xffffffff, 0x2089d,
- 0xd40, 0xffffffff, 0xca,
- 0xd41, 0xffffffff, 0x8a1,
- 0xd40, 0xffffffff, 0xcc,
- 0xd41, 0xffffffff, 0x8a3,
- 0xd40, 0xffffffff, 0xce,
- 0xd41, 0xffffffff, 0x308a5,
- 0xd40, 0xffffffff, 0xd3,
- 0xd41, 0xffffffff, 0x6d08cd,
- 0xd40, 0xffffffff, 0x142,
- 0xd41, 0xffffffff, 0x2000095a,
- 0xd41, 0xffffffff, 0x1,
- 0xd40, 0xffffffff, 0x144,
- 0xd41, 0xffffffff, 0x301f095b,
- 0xd40, 0xffffffff, 0x165,
- 0xd41, 0xffffffff, 0xc094d,
- 0xd40, 0xffffffff, 0x173,
- 0xd41, 0xffffffff, 0xf096d,
- 0xd40, 0xffffffff, 0x184,
- 0xd41, 0xffffffff, 0x15097f,
- 0xd40, 0xffffffff, 0x19b,
- 0xd41, 0xffffffff, 0xc0998,
- 0xd40, 0xffffffff, 0x1a9,
- 0xd41, 0xffffffff, 0x409a7,
- 0xd40, 0xffffffff, 0x1af,
- 0xd41, 0xffffffff, 0xcdc,
- 0xd40, 0xffffffff, 0x1b1,
- 0xd41, 0xffffffff, 0x800,
- 0xd42, 0xffffffff, 0x6c9b2000,
- 0xd44, 0xfc00, 0x2000,
- 0xd51, 0xffffffff, 0xfc0,
- 0xa35, 0x00000100, 0x100
+ 0x0d4f, 0xffffffff, 0x40000,
+ 0x0d4e, 0xffffffff, 0x200010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x7007,
+ 0x0d4e, 0xffffffff, 0x300010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x400000,
+ 0x0d4e, 0xffffffff, 0x100010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x120200,
+ 0x0d4e, 0xffffffff, 0x500010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x1e1e16,
+ 0x0d4e, 0xffffffff, 0x600010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x171f1e,
+ 0x0d4e, 0xffffffff, 0x700010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4e, 0xffffffff, 0x9ff,
+ 0x0d40, 0xffffffff, 0x0,
+ 0x0d41, 0xffffffff, 0x10000800,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d40, 0xffffffff, 0x4,
+ 0x0d41, 0xffffffff, 0x1000051e,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d40, 0xffffffff, 0x8,
+ 0x0d41, 0xffffffff, 0x80500,
+ 0x0d40, 0xffffffff, 0x12,
+ 0x0d41, 0xffffffff, 0x9050c,
+ 0x0d40, 0xffffffff, 0x1d,
+ 0x0d41, 0xffffffff, 0xb052c,
+ 0x0d40, 0xffffffff, 0x2a,
+ 0x0d41, 0xffffffff, 0x1053e,
+ 0x0d40, 0xffffffff, 0x2d,
+ 0x0d41, 0xffffffff, 0x10546,
+ 0x0d40, 0xffffffff, 0x30,
+ 0x0d41, 0xffffffff, 0xa054e,
+ 0x0d40, 0xffffffff, 0x3c,
+ 0x0d41, 0xffffffff, 0x1055f,
+ 0x0d40, 0xffffffff, 0x3f,
+ 0x0d41, 0xffffffff, 0x10567,
+ 0x0d40, 0xffffffff, 0x42,
+ 0x0d41, 0xffffffff, 0x1056f,
+ 0x0d40, 0xffffffff, 0x45,
+ 0x0d41, 0xffffffff, 0x10572,
+ 0x0d40, 0xffffffff, 0x48,
+ 0x0d41, 0xffffffff, 0x20575,
+ 0x0d40, 0xffffffff, 0x4c,
+ 0x0d41, 0xffffffff, 0x190801,
+ 0x0d40, 0xffffffff, 0x67,
+ 0x0d41, 0xffffffff, 0x1082a,
+ 0x0d40, 0xffffffff, 0x6a,
+ 0x0d41, 0xffffffff, 0x1b082d,
+ 0x0d40, 0xffffffff, 0x87,
+ 0x0d41, 0xffffffff, 0x310851,
+ 0x0d40, 0xffffffff, 0xba,
+ 0x0d41, 0xffffffff, 0x891,
+ 0x0d40, 0xffffffff, 0xbc,
+ 0x0d41, 0xffffffff, 0x893,
+ 0x0d40, 0xffffffff, 0xbe,
+ 0x0d41, 0xffffffff, 0x20895,
+ 0x0d40, 0xffffffff, 0xc2,
+ 0x0d41, 0xffffffff, 0x20899,
+ 0x0d40, 0xffffffff, 0xc6,
+ 0x0d41, 0xffffffff, 0x2089d,
+ 0x0d40, 0xffffffff, 0xca,
+ 0x0d41, 0xffffffff, 0x8a1,
+ 0x0d40, 0xffffffff, 0xcc,
+ 0x0d41, 0xffffffff, 0x8a3,
+ 0x0d40, 0xffffffff, 0xce,
+ 0x0d41, 0xffffffff, 0x308a5,
+ 0x0d40, 0xffffffff, 0xd3,
+ 0x0d41, 0xffffffff, 0x6d08cd,
+ 0x0d40, 0xffffffff, 0x142,
+ 0x0d41, 0xffffffff, 0x2000095a,
+ 0x0d41, 0xffffffff, 0x1,
+ 0x0d40, 0xffffffff, 0x144,
+ 0x0d41, 0xffffffff, 0x301f095b,
+ 0x0d40, 0xffffffff, 0x165,
+ 0x0d41, 0xffffffff, 0xc094d,
+ 0x0d40, 0xffffffff, 0x173,
+ 0x0d41, 0xffffffff, 0xf096d,
+ 0x0d40, 0xffffffff, 0x184,
+ 0x0d41, 0xffffffff, 0x15097f,
+ 0x0d40, 0xffffffff, 0x19b,
+ 0x0d41, 0xffffffff, 0xc0998,
+ 0x0d40, 0xffffffff, 0x1a9,
+ 0x0d41, 0xffffffff, 0x409a7,
+ 0x0d40, 0xffffffff, 0x1af,
+ 0x0d41, 0xffffffff, 0xcdc,
+ 0x0d40, 0xffffffff, 0x1b1,
+ 0x0d41, 0xffffffff, 0x800,
+ 0x0d42, 0xffffffff, 0x6c9b2000,
+ 0x0d44, 0xfc00, 0x2000,
+ 0x0d51, 0xffffffff, 0xfc0,
+ 0x0a35, 0x00000100, 0x100
};
static const u32 verde_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x033f1005,
0x311f, 0xffffffff, 0x10808020,
0x3122, 0xffffffff, 0x00800008,
@@ -269,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] =
static const u32 verde_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
-
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x0903, 0x000007ff, 0x00000000,
0x0903, 0x000007ff, 0x00000000,
0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xffffffff, 0x00ffffff,
+ 0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
-
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x23a2, 0x01ff1f3f, 0x00000000,
- 0x23a3, 0x01ff1f3f, 0x00000000,
0x23a2, 0x01ff1f3f, 0x00000000,
0x23a1, 0x01ff1f3f, 0x00000000,
- 0x23a1, 0x01ff1f3f, 0x00000000,
-
- 0x23a1, 0x01ff1f3f, 0x00000000,
0x2418, 0x0000007f, 0x00000020,
0x2542, 0x00010000, 0x00010000,
- 0x2b01, 0x000003ff, 0x00000003,
- 0x2b05, 0x000003ff, 0x00000003,
0x2b05, 0x000003ff, 0x00000003,
0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00001032,
- 0x2b03, 0xffffffff, 0x00001032,
0x2b03, 0xffffffff, 0x00001032,
0x2235, 0x0000001f, 0x00000010,
- 0x2235, 0x0000001f, 0x00000010,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -336,7 +329,7 @@ static const u32 oland_golden_registers[] =
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
- 0x16f9, 0x00200000, 0x50100000,
+ 0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
@@ -345,7 +338,7 @@ static const u32 oland_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000082,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -354,11 +347,16 @@ static const u32 oland_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
@@ -368,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] =
static const u32 hainan_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
- 0x4595, 0xff000fff, 0x00000100,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x3430, 0xff000fff, 0x00000100,
0x340c, 0x000300c0, 0x00800040,
0x3630, 0xff000fff, 0x00000100,
0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
0x2285, 0xf000001f, 0x00000007,
0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
- 0xa393, 0x07ffffff, 0x4e000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000000,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x03e00000, 0x03600000,
0x2418, 0x0000007f, 0x00000020,
@@ -392,12 +395,16 @@ static const u32 hainan_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 hainan_golden_registers2[] =
{
- 0x263e, 0xffffffff, 0x02010001
+ 0x263e, 0xffffffff, 0x2011003
};
static const u32 tahiti_mgcg_cgcg_init[] =
@@ -513,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -612,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -709,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -788,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -867,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -1179,6 +1186,8 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = (adev->rev_id == 0) ? 1 :
+ (adev->rev_id == 1) ? 5 : 6;
break;
case CHIP_PITCAIRN:
adev->cg_flags =
@@ -1198,6 +1207,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 20;
break;
case CHIP_VERDE:
@@ -1219,7 +1229,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
//???
- adev->external_rev_id = adev->rev_id + 0x14;
+ adev->external_rev_id = adev->rev_id + 40;
break;
case CHIP_OLAND:
adev->cg_flags =
@@ -1238,6 +1248,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 60;
break;
case CHIP_HAINAN:
adev->cg_flags =
@@ -1255,6 +1266,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 70;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9f771f4ffcb7..bf088d6d9bf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -932,18 +932,64 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_UVD;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x5A;
break;
case CHIP_POLARIS10:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
@@ -971,6 +1017,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
}
@@ -996,6 +1043,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x61;
@@ -1155,57 +1203,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
static int vi_common_set_clockgating_state_by_smu(void *handle,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+ pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+ pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d1986276dbbd..c02469ada9f1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -126,6 +126,10 @@ enum amd_vce_level {
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
+#define AMD_CG_SUPPORT_DRM_LS (1 << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG (1 << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG (1 << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 904beaa932d0..f75c6421db62 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
ast_write32(ast, 0x10000, 0xfc600309);
do {
- ;
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ast_detect_chip(dev, &need_post);
if (ast->chip != AST1180) {
- ast_get_dram_info(dev);
+ ret = ast_get_dram_info(dev);
+ if (ret)
+ goto out_free;
ast->vram_size = ast_get_vram_info(dev);
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 56002a52936d..243224aeabf8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 412f3513f269..4a31b7a891ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -174,21 +174,35 @@ static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
+ char *vaddr;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL);
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ obj->base.size,
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(page)) {
+ st = ERR_CAST(page);
+ goto err_phys;
+ }
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ if (!st) {
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
+ }
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
- sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
+ obj->phys_handle = phys;
+ return st;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
return st;
}
@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
- drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj);
}
@@ -538,15 +561,13 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
- drm_dma_handle_t *phys;
int ret;
- if (obj->phys_handle) {
- if ((unsigned long)obj->phys_handle->vaddr & (align -1))
- return -EBUSY;
+ if (align > obj->base.size)
+ return -EINVAL;
+ if (obj->ops == &i915_gem_phys_ops)
return 0;
- }
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
- /* create a new object */
- phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
- if (!phys)
- return -ENOMEM;
-
- obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj);
@@ -2326,7 +2341,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL)
return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret)
- goto err_pages;
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ebaa941c83af..abc78bbfc1dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
*/
- if (start < 4096 && (IS_GEN8(dev_priv) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+ if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 47590ab08d7e..3df8d3dd31cd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8405b5a367d7..7e3545f65257 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,14 +46,20 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
enum mipi_seq {
MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cf5cff7b03b8..6daad8613760 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.vco = 0;
}
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 val;
-
- /* inform PCU we want to change CDCLK */
- val = SKL_CDCLK_PREPARE_FOR_CHANGE;
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
- return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
u32 freq_select, pcu_ack;
+ int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
- if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
- DRM_ERROR("failed to inform PCU about cdclk change\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 90283edcafba..d9bc19be855e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4014,8 +4014,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* FIXME: we need to synchronize this sort of stuff with hardware
- * readout */
- if (WARN_ON_ONCE(!intel_dp->lane_count))
+ * readout. Currently fast link training doesn't work on boot-up. */
+ if (!intel_dp->lane_count)
return;
/* if link training is requested we should perform it always */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 0d8ff0034b88..47cd1b20fb3e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+ CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock);
}
@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
- [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
- [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0a09024d6ca3..d4961fa20c73 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- ret = logical_ring_init(engine);
- if (ret) {
- lrc_destroy_wa_ctx_obj(engine);
- }
-
- return ret;
+ return logical_ring_init(engine);
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d67974eb127a..ae2c0bb4b2e8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
- uint32_t temp = GEN9_SAGV_DISABLE;
-
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- &temp);
- if (ret)
- return ret;
- else
- return temp & GEN9_SAGV_IS_DISABLED;
-}
-
int
intel_disable_sagv(struct drm_i915_private *dev_priv)
{
- int ret, result;
+ int ret;
if (!intel_has_sagv(dev_priv))
return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret == -ETIMEDOUT) {
- DRM_ERROR("Request to disable SAGV timed out\n");
- return -ETIMEDOUT;
- }
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
- } else if (result < 0) {
- DRM_ERROR("Failed to disable the SAGV\n");
- return result;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ return ret;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
return 0;
}
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ u32 val = request;
+
+ *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+ return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+ &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * account for interrupts that could reduce the number of these
+ * requests.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 10);
+ preempt_enable();
+
+out:
+ return ret ? ret : status;
+#undef COND
+}
+
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
/*
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 7b488e2793d9..c6be70686b4a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- /* Per platform default */
- if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- i915.enable_psr = 1;
- else
- i915.enable_psr = 0;
- }
+ /* Per platform default: all disabled. */
+ if (i915.enable_psr == -1)
+ i915.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 356c662ad453..87b4af092d54 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 21b6732425c5..c829cfb02fc4 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 08153ea4d848..6ce431323125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -150,6 +150,29 @@ static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare)
return 0;
}
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = i2c_dw_read_comp_param(dev);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ dev->adapter.nr = id;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -245,13 +268,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
1000000);
}
- if (!dev->tx_fifo_depth) {
- u32 param1 = i2c_dw_read_comp_param(dev);
-
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- dev->adapter.nr = pdev->id;
- }
+ dw_i2c_set_fifo_size(dev, pdev->id);
adap = &dev->adapter;
adap->owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 3d10f1a802be..1d8775799056 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -342,7 +342,9 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- data[i] = octeon_i2c_data_read(i2c);
+ data[i] = octeon_i2c_data_read(i2c, &result);
+ if (result)
+ return result;
if (recv_len && i == 0) {
if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 87151ea74acd..e160f838c254 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -141,11 +141,14 @@ static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
*/
static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0)
+ return;
} while ((tmp & SW_TWSI_V) != 0);
}
@@ -163,24 +166,32 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+ int *error)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0) {
+ /* signal that the returned data is invalid */
+ if (error)
+ *error = -EIO;
+ return 0;
+ }
} while ((tmp & SW_TWSI_V) != 0);
return tmp & 0xFF;
}
#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
/**
* octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 05cf192ef1ac..0ab1e55558bc 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -415,6 +415,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
adapter->algo = &xgene_slimpro_i2c_algorithm;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 3ab654bbfab5..b7ca249ec9c3 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -95,6 +95,7 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ int ret = -ENODEV;
if (adap->algo->master_xfer) {
struct i2c_msg msg;
@@ -104,17 +105,21 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
msg.flags = 0;
msg.len = 2;
msg.buf = msgbuf;
- return __i2c_transfer(adap, &msg, 1);
+ ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
} else if (adap->algo->smbus_xfer) {
union i2c_smbus_data data;
data.byte = val;
- return adap->algo->smbus_xfer(adap, client->addr,
- client->flags, I2C_SMBUS_WRITE,
- pdata->sel_reg_addr,
- I2C_SMBUS_BYTE_DATA, &data);
- } else
- return -ENODEV;
+ ret = adap->algo->smbus_xfer(adap, client->addr,
+ client->flags, I2C_SMBUS_WRITE,
+ pdata->sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
+ }
+
+ return ret;
}
static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
@@ -127,10 +132,7 @@ static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
- if (err)
- data->last_chan = 0;
- else
- data->last_chan = regval;
+ data->last_chan = err < 0 ? 0 : regval;
}
return err;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 9a348ee4dc14..dd18b9ccb1f4 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -167,6 +167,9 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
buf[0] = val;
msg.buf = buf;
ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
} else {
union i2c_smbus_data data;
ret = adap->algo->smbus_xfer(adap, client->addr,
@@ -195,7 +198,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
- data->last_chan = ret ? 0 : regval;
+ data->last_chan = ret < 0 ? 0 : regval;
}
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b7ac97b27c88..cda5542e13a2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+ FW_RI_RES_WR_FBMAX_V(3)) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 392f78384a60..98923a8cf86d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -358,13 +358,16 @@ void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
* @dev: sc device struct
* @pd: sc pd ptr
* @pd_id: pd_id for allocated pd
+ * @abi_ver: ABI version from user context, -1 if not valid
*/
static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
struct i40iw_sc_pd *pd,
- u16 pd_id)
+ u16 pd_id,
+ int abi_ver)
{
pd->size = sizeof(*pd);
pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
pd->dev = dev;
}
@@ -2252,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
offset);
info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+ info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
if (ret_code)
return ret_code;
@@ -2270,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
__func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
- ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
- &wqe_size);
- if (ret_code)
- return ret_code;
+
+ switch (qp->pd->abi_ver) {
+ case 4:
+ ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ wqe_size = I40IW_MAX_WQE_SIZE_RQ;
+ break;
+ }
qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
(wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 449ba8c81ce7..db41ab40da9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -930,7 +930,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
INIT_LIST_HEAD(&rsrc->txpend);
rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
- dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+ dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
rsrc->qp_id = info->qp_id;
rsrc->cq_id = info->cq_id;
rsrc->sq_size = info->sq_size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index f3f8e9cc3c05..7b76259752b0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -280,6 +280,7 @@ struct i40iw_sc_pd {
u32 size;
struct i40iw_sc_dev *dev;
u16 pd_id;
+ int abi_ver;
};
struct i40iw_cqp_quanta {
@@ -852,6 +853,7 @@ struct i40iw_qp_init_info {
u64 host_ctx_pa;
u64 q2_pa;
u64 shadow_area_pa;
+ int abi_ver;
u8 sq_tph_val;
u8 rq_tph_val;
u8 type;
@@ -1051,7 +1053,7 @@ struct i40iw_aeq_ops {
};
struct i40iw_pd_ops {
- void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+ void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
};
struct i40iw_priv_qp_ops {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
index 12acd688def4..57d3f1d11ff1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
@@ -39,8 +39,8 @@
#include <linux/types.h>
-#define I40IW_ABI_USERSPACE_VER 4
-#define I40IW_ABI_KERNEL_VER 4
+#define I40IW_ABI_VER 5
+
struct i40iw_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 4376cd628774..2800f796271c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -966,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
-
qp->sq_base = info->sq;
qp->rq_base = info->rq;
qp->shadow_area = info->shadow_area;
@@ -998,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (!qp->use_srq) {
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
- qp->rq_wqe_size = rqshift;
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+ switch (info->abi_ver) {
+ case 4:
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ rqshift = I40IW_MAX_RQ_WQE_SHIFT;
+ break;
+ }
+ qp->rq_wqe_size = rqshift;
qp->rq_wqe_size_multiplier = 4 << rqshift;
}
qp->ops = iw_qp_uk_ops;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 80d9f464f65e..84be6f13b9c5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -76,6 +76,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
+ I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_QP_CTX_SIZE = 248,
I40IW_MAX_PDS = 32768
};
@@ -97,6 +98,7 @@ enum i40iw_device_capabilities_const {
#define i40iw_address_list u64 *
#define I40IW_MAX_MR_SIZE 0x10000000000L
+#define I40IW_MAX_RQ_WQE_SHIFT 2
struct i40iw_qp_uk;
struct i40iw_cq_uk;
@@ -405,7 +407,7 @@ struct i40iw_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
-
+ int abi_ver;
};
struct i40iw_cq_uk_init_info {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 7368a50bbdaa..29e97df9e1a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -145,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
if (ib_copy_from_udata(&req, udata, sizeof(req)))
return ERR_PTR(-EINVAL);
- if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
- i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
- req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+ if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
+ i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
return ERR_PTR(-EINVAL);
}
@@ -155,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
uresp.max_qps = iwdev->max_qp;
uresp.max_pds = iwdev->max_pd;
uresp.wq_size = iwdev->max_qp_wr * 2;
- uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+ uresp.kernel_ver = req.userspace_ver;
ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
if (!ucontext)
return ERR_PTR(-ENOMEM);
ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
kfree(ucontext);
@@ -333,6 +333,7 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd;
+ struct i40iw_ucontext *ucontext;
u32 pd_id = 0;
int err;
@@ -353,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
}
sc_pd = &iwpd->sc_pd;
- dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
if (context) {
+ ucontext = to_ucontext(context);
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
memset(&uresp, 0, sizeof(uresp));
uresp.pd_id = pd_id;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
err = -EFAULT;
goto error;
}
+ } else {
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
}
i40iw_add_pdusecount(iwpd);
@@ -518,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
u32 sq_size, rq_size;
- u8 sqshift, rqshift;
+ u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
@@ -527,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
- if (!status)
- status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
-
if (status)
return -ENOMEM;
sqdepth = sq_size << sqshift;
- rqdepth = rq_size << rqshift;
+ rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 6549c939500f..07c3fec77de6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -42,6 +42,7 @@ struct i40iw_ucontext {
spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+ int abi_ver;
};
struct i40iw_pd {
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 302fb05e6e6f..57c8de208077 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -890,6 +890,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
pbl_ptr = cq->q.pbl_tbl->pa;
page_cnt = cq->q.pbl_info.num_pbes;
+
+ cq->ibcq.cqe = chain_entries;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -905,6 +907,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ cq->ibcq.cqe = cq->pbl.capacity;
}
qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
@@ -982,8 +985,13 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ int rc;
+
iparams.icid = cq->icid;
- dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+ &oparams);
+ if (rc)
+ return rc;
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
}
@@ -1966,7 +1974,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
- qedr_update_qp_state(dev, qp, qp_params.new_state);
+ rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
qp->state = qp_params.new_state;
}
@@ -2070,8 +2078,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
- if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
- QED_ROCE_QP_STATE_INIT)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
@@ -2626,7 +2636,9 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
- if (wr->send_flags & IB_SEND_INLINE) {
+ if (wr->send_flags & IB_SEND_INLINE &&
+ (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE)) {
u8 flags = 0;
SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
@@ -2977,8 +2989,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -3031,8 +3044,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if (qp->state == QED_ROCE_QP_STATE_RESET) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
return -EINVAL;
@@ -3174,6 +3186,7 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = status;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
@@ -3225,7 +3238,7 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
- IB_WC_WR_FLUSH_ERR, 0);
+ IB_WC_WR_FLUSH_ERR, 1);
break;
default:
/* process all WQE before the cosumer */
@@ -3363,6 +3376,7 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = wc_status;
+ wc->vendor_err = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
wc->wr_id = wr_id;
@@ -3391,6 +3405,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
while (num_entries && qp->rq.wqe_cons != hw_cons) {
/* fill WC */
wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->byte_len = 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index cd27cbde7652..d369f24425f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -224,7 +224,7 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
else
return COMPST_DONE;
} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
- return COMPST_ERROR_RETRY;
+ return COMPST_DONE;
} else {
return COMPST_CHECK_ACK;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 16967cdb45df..342e78163613 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -455,8 +455,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EAGAIN;
}
- if (pkt->qp)
- atomic_inc(&pkt->qp->skb_out);
+ atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c3e60e4bde6e..486d576e55bc 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -855,4 +855,5 @@ void rxe_qp_cleanup(void *arg)
free_rd_atomic_resources(qp);
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 7a36ec9dbc0c..3435efff8799 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1070,12 +1070,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
+ u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
rc = RESPST_CLEANUP;
goto out;
} else if (pkt->mask & RXE_READ_MASK) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 971154cbbb03..6799cf9713f7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2209,14 +2209,13 @@ static void __init free_dma_resources(void)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
int i, remap_cache_sz, ret = 0;
if (!amd_iommu_detected)
return -ENODEV;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return -ENODEV;
else if (ACPI_FAILURE(status)) {
@@ -2338,7 +2337,7 @@ static int __init early_amd_iommu_init(void)
out:
/* Don't leak any ACPI memory */
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
ivrs_base = NULL;
return ret;
@@ -2362,10 +2361,9 @@ out:
static bool detect_ivrs(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return false;
else if (ACPI_FAILURE(status)) {
@@ -2374,7 +2372,7 @@ static bool detect_ivrs(void)
return false;
}
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8c53748a769d..a88576d50740 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -68,7 +68,6 @@ DECLARE_RWSEM(dmar_global_lock);
LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
@@ -543,9 +542,7 @@ static int __init dmar_table_detect(void)
acpi_status status = AE_OK;
/* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl,
- &dmar_tbl_size);
+ status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
pr_warn("Unable to map DMAR\n");
@@ -906,7 +903,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
+ acpi_put_table(dmar_tbl);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index c19dd820ea9b..2aeb034d5fb9 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -60,7 +60,13 @@
#define RING_ENTRY_SIZE sizeof(struct dma64dd)
/* # entries in PDC dma ring */
-#define PDC_RING_ENTRIES 128
+#define PDC_RING_ENTRIES 512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN 15
+
#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
/* Rings are 8k aligned */
#define RING_ALIGN_ORDER 13
@@ -93,11 +99,9 @@
* Interrupt mask and status definitions. Enable interrupts for tx and rx on
* ring 0
*/
-#define PDC_XMTINT_0 (24 + PDC_RINGSET)
#define PDC_RCVINT_0 (16 + PDC_RINGSET)
-#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
-#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_INTMASK (PDC_RCVINTEN_0)
#define PDC_LAZY_FRAMECOUNT 1
#define PDC_LAZY_TIMEOUT 10000
#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
@@ -117,15 +121,16 @@
/*
* Sets the following bits for write to transmit control reg:
- * 0 - XmtEn - enable activity on the tx channel
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_TX_CTL 0x000C0801
+#define PDC_TX_CTL 0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE 0x1
/*
* Sets the following bits for write to receive control reg:
- * 0 - RcvEn - enable activity on the rx channel
* 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
* 9 - SepRxHdrDescEn - place start of new frames only in descriptors
* that have StartOfFrame set
@@ -135,7 +140,10 @@
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_RX_CTL 0x000C0E01
+#define PDC_RX_CTL 0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE 0x1
#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
@@ -252,11 +260,29 @@ struct pdc_ring_alloc {
u32 size; /* ring allocation size in bytes */
};
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ * rx ring index.
+ * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
+ * index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ * at a descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries,
+ * then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+ void *rxp_ctx;
+ struct scatterlist *dst_sg;
+ u32 rxin_numd;
+ void *resp_hdr;
+ dma_addr_t resp_hdr_daddr;
+};
+
/* PDC state structure */
struct pdc_state {
- /* synchronize access to this PDC state structure */
- spinlock_t pdc_lock;
-
/* Index of the PDC whose state is in this structure instance */
u8 pdc_idx;
@@ -272,13 +298,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /*
- * Last interrupt status read from PDC device. Saved in interrupt
- * handler so the handler can clear the interrupt in the device,
- * and the interrupt thread called later can know which interrupt
- * bits are active.
- */
- unsigned long intstatus;
+ /* tasklet for deferred processing after DMA rx interrupt */
+ struct tasklet_struct rx_tasklet;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -369,11 +390,7 @@ struct pdc_state {
/* Index of next rx descriptor to post. */
u32 rxout;
- /*
- * opaque context associated with frame that starts at each
- * rx ring index.
- */
- void *rxp_ctx[PDC_RING_ENTRIES];
+ struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
/*
* Scatterlists used to form request and reply frames beginning at a
@@ -381,27 +398,18 @@ struct pdc_state {
* is processed
*/
struct scatterlist *src_sg[PDC_RING_ENTRIES];
- struct scatterlist *dst_sg[PDC_RING_ENTRIES];
-
- /*
- * Number of rx descriptors associated with the message that starts
- * at this descriptor index. Not set for every index. For example,
- * if descriptor index i points to a scatterlist with 4 entries, then
- * the next three descriptor indexes don't have a value set.
- */
- u32 rxin_numd[PDC_RING_ENTRIES];
-
- void *resp_hdr[PDC_RING_ENTRIES];
- dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
/* counters */
- u32 pdc_requests; /* number of request messages submitted */
- u32 pdc_replies; /* number of reply messages received */
- u32 txnobuf; /* count of tx ring full */
- u32 rxnobuf; /* count of rx ring full */
- u32 rx_oflow; /* count of rx overflows */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 last_tx_not_done; /* too few tx descriptors to indicate done */
+ u32 tx_ring_full; /* unable to accept msg because tx ring full */
+ u32 rx_ring_full; /* unable to accept msg because rx ring full */
+ u32 txnobuf; /* unable to create tx descriptor */
+ u32 rxnobuf; /* unable to create rx descriptor */
+ u32 rx_oflow; /* count of rx overflows */
};
/* Global variables */
@@ -434,20 +442,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC requests............%u\n",
+ "PDC requests....................%u\n",
pdcs->pdc_requests);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC responses...........%u\n",
+ "PDC responses...................%u\n",
pdcs->pdc_replies);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Tx err ring full........%u\n",
+ "Tx not done.....................%u\n",
+ pdcs->last_tx_not_done);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx ring full....................%u\n",
+ pdcs->tx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Rx ring full....................%u\n",
+ pdcs->rx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Rx err ring full........%u\n",
+ "Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Receive overflow........%u\n",
+ "Receive overflow................%u\n",
pdcs->rx_oflow);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Num frags in rx ring............%u\n",
+ NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost));
if (out_offset > out_count)
out_offset = out_count;
@@ -480,17 +501,16 @@ static void pdc_setup_debugfs(struct pdc_state *pdcs)
if (!debugfs_dir)
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+ /* S_IRUSR == 0400 */
+ pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
debugfs_dir, pdcs,
&pdc_debugfs_stats);
}
static void pdc_free_debugfs(void)
{
- if (debugfs_dir && simple_empty(debugfs_dir)) {
- debugfs_remove_recursive(debugfs_dir);
- debugfs_dir = NULL;
- }
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
}
/**
@@ -505,17 +525,17 @@ pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
u32 buf_len, u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
dev_dbg(dev,
"Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
- iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+ rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ rxd->ctrl1 = cpu_to_le32(flags);
+ rxd->ctrl2 = cpu_to_le32(buf_len);
+
/* bump ring index and return */
pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
}
@@ -533,53 +553,50 @@ pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
dev_dbg(dev,
"Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
pdcs->pdc_idx, pdcs->txout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
- iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+ txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ txd->ctrl1 = cpu_to_le32(flags);
+ txd->ctrl2 = cpu_to_le32(buf_len);
/* bump ring index and return */
pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
}
/**
- * pdc_receive() - Receive a response message from a given SPU.
+ * pdc_receive_one() - Receive a response message from a given SPU.
* @pdcs: PDC state for the SPU to receive from
- * @mssg: mailbox message to be returned to client
*
* When the return code indicates success, the response message is available in
* the receive buffers provided prior to submission of the request.
*
- * Input:
- * pdcs - PDC state structure for the SPU to be polled
- * mssg - mailbox message to be returned to client. This function sets the
- * context pointer on the message to help the client associate the
- * response with a request.
- *
* Return: PDC_SUCCESS if one or more receive descriptors was processed
* -EAGAIN indicates that no response message is available
* -EIO an error occurred
*/
static int
-pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+pdc_receive_one(struct pdc_state *pdcs)
{
struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ struct brcm_message mssg;
u32 len, rx_status;
u32 num_frags;
- int i;
u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
u32 frags_rdy; /* number of fragments ready to read */
u32 rx_idx; /* ring index of start of receive frame */
dma_addr_t resp_hdr_daddr;
+ struct pdc_rx_ctx *rx_ctx;
- spin_lock(&pdcs->pdc_lock);
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+ mssg.type = BRCM_MESSAGE_SPU;
/*
* return if a complete response message is not yet ready.
@@ -587,47 +604,34 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
* to read.
*/
frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
- if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* See if the hw has written more fragments than we know */
- pdcs->last_rx_curr =
- (ioread32((void *)&pdcs->rxregs_64->status0) &
- CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
- frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
- pdcs->nrxpost);
- if ((frags_rdy == 0) ||
- (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* No response ready */
- spin_unlock(&pdcs->pdc_lock);
- return -EAGAIN;
- }
- /* can't read descriptors/data until write index is read */
- rmb();
- }
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+ /* No response ready */
+ return -EAGAIN;
num_frags = pdcs->txin_numd[pdcs->txin];
+ WARN_ON(num_frags == 0);
+
dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
- for (i = 0; i < num_frags; i++)
- pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+ pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
pdcs->pdc_idx, num_frags);
rx_idx = pdcs->rxin;
- num_frags = pdcs->rxin_numd[rx_idx];
+ rx_ctx = &pdcs->rx_ctx[rx_idx];
+ num_frags = rx_ctx->rxin_numd;
/* Return opaque context with result */
- mssg->ctx = pdcs->rxp_ctx[rx_idx];
- pdcs->rxp_ctx[rx_idx] = NULL;
- resp_hdr = pdcs->resp_hdr[rx_idx];
- resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
- dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
- sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
-
- for (i = 0; i < num_frags; i++)
- pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+ mssg.ctx = rx_ctx->rxp_ctx;
+ rx_ctx->rxp_ctx = NULL;
+ resp_hdr = rx_ctx->resp_hdr;
+ resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+ dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+ DMA_FROM_DEVICE);
- spin_unlock(&pdcs->pdc_lock);
+ pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
pdcs->pdc_idx, num_frags);
@@ -659,12 +663,35 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+ mbox_chan_received_data(chan, &mssg);
+
pdcs->pdc_replies++;
- /* if we read one or more rx descriptors, claim success */
- if (num_frags > 0)
- return PDC_SUCCESS;
- else
- return -EIO;
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs: PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+ int rx_status;
+
+ /* read last_rx_curr from register once */
+ pdcs->last_rx_curr =
+ (ioread32(&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+ do {
+ /* Could be many frames ready */
+ rx_status = pdc_receive_one(pdcs);
+ } while (rx_status == PDC_SUCCESS);
+
+ return 0;
}
/**
@@ -766,8 +793,8 @@ static int pdc_tx_list_final(struct pdc_state *pdcs)
* before chip starts to process new request
*/
wmb();
- iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
- iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+ iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
pdcs->pdc_requests++;
return PDC_SUCCESS;
@@ -796,6 +823,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
dma_addr_t daddr;
void *vaddr;
+ struct pdc_rx_ctx *rx_ctx;
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
@@ -806,7 +834,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
- if (!vaddr)
+ if (unlikely(!vaddr))
return -ENOMEM;
/*
@@ -819,15 +847,16 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* This is always the first descriptor in the receive sequence */
flags = D64_CTRL1_SOF;
- pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags |= D64_CTRL1_EOT;
- pdcs->rxp_ctx[pdcs->rxout] = ctx;
- pdcs->dst_sg[pdcs->rxout] = dst_sg;
- pdcs->resp_hdr[pdcs->rxout] = vaddr;
- pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+ rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+ rx_ctx->rxp_ctx = ctx;
+ rx_ctx->dst_sg = dst_sg;
+ rx_ctx->resp_hdr = vaddr;
+ rx_ctx->resp_hdr_daddr = daddr;
pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
return PDC_SUCCESS;
}
@@ -895,7 +924,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
desc_w++;
sg = sg_next(sg);
}
- pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
return PDC_SUCCESS;
}
@@ -903,7 +932,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
/**
* pdc_irq_handler() - Interrupt handler called in interrupt context.
* @irq: Interrupt number that has fired
- * @cookie: PDC state for DMA engine that generated the interrupt
+ * @data: device struct for DMA engine that generated the interrupt
*
* We have to clear the device interrupt status flags here. So cache the
* status for later use in the thread function. Other than that, just return
@@ -912,88 +941,39 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
* Return: IRQ_WAKE_THREAD if interrupt is ours
* IRQ_NONE otherwise
*/
-static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+static irqreturn_t pdc_irq_handler(int irq, void *data)
{
- struct pdc_state *pdcs = cookie;
+ struct device *dev = (struct device *)data;
+ struct pdc_state *pdcs = dev_get_drvdata(dev);
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
- if (intstatus & PDC_XMTINTEN_0)
- set_bit(PDC_XMTINT_0, &pdcs->intstatus);
- if (intstatus & PDC_RCVINTEN_0)
- set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ if (unlikely(intstatus == 0))
+ return IRQ_NONE;
+
+ /* Disable interrupts until soft handler runs */
+ iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
- return IRQ_WAKE_THREAD;
-
- return IRQ_NONE;
+ tasklet_schedule(&pdcs->rx_tasklet);
+ return IRQ_HANDLED;
}
/**
- * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
- * completed or data is available to receive.
- * @irq: Interrupt number
- * @cookie: PDC state for PDC that generated the interrupt
- *
- * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
- * as many SPU response messages as are available and send each to the mailbox
- * client.
- *
- * Return: IRQ_HANDLED if we recognized and handled the interrupt
- * IRQ_NONE otherwise
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @data: PDC state structure
*/
-static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+static void pdc_tasklet_cb(unsigned long data)
{
- struct pdc_state *pdcs = cookie;
- struct mbox_controller *mbc;
- struct mbox_chan *chan;
- bool tx_int;
- bool rx_int;
- int rx_status;
- struct brcm_message mssg;
+ struct pdc_state *pdcs = (struct pdc_state *)data;
- tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
- rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ pdc_receive(pdcs);
- if (pdcs && (tx_int || rx_int)) {
- dev_dbg(&pdcs->pdev->dev,
- "%s() got irq %d with tx_int %s, rx_int %s",
- __func__, irq,
- tx_int ? "set" : "clear", rx_int ? "set" : "clear");
-
- mbc = &pdcs->mbc;
- chan = &mbc->chans[0];
-
- if (tx_int) {
- dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
- /* only one frame in flight at a time */
- mbox_chan_txdone(chan, PDC_SUCCESS);
- }
- if (rx_int) {
- while (1) {
- /* Could be many frames ready */
- memset(&mssg, 0, sizeof(mssg));
- mssg.type = BRCM_MESSAGE_SPU;
- rx_status = pdc_receive(pdcs, &mssg);
- if (rx_status >= 0) {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): invoking client rx cb",
- __func__);
- mbox_chan_received_data(chan, &mssg);
- } else {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): no SPU response available",
- __func__);
- break;
- }
- }
- }
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ /* reenable interrupts */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
}
/**
@@ -1016,14 +996,14 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
- if (!tx.vbase) {
+ if (unlikely(!tx.vbase)) {
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
- if (!rx.vbase) {
+ if (unlikely(!rx.vbase)) {
err = -ENOMEM;
goto fail_dealloc;
}
@@ -1033,9 +1013,6 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
- /* lock after ring allocation to avoid scheduling while atomic */
- spin_lock(&pdcs->pdc_lock);
-
memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
@@ -1053,40 +1030,52 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Tell device the base DMA address of each ring */
dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* But first disable DMA and set curptr to 0 for both TX & RX */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ /* Set base DMA addresses */
iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrlow);
+ &dma_reg->dmaxmt.addrlow);
iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrhigh);
+ &dma_reg->dmaxmt.addrhigh);
iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrlow);
+ &dma_reg->dmarcv.addrlow);
iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrhigh);
+ &dma_reg->dmarcv.addrhigh);
+
+ /* Re-enable DMA */
+ iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
/* Initialize descriptors */
for (i = 0; i < PDC_RING_ENTRIES; i++) {
/* Every tx descriptor can be used for start of frame. */
if (i != pdcs->ntxpost) {
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
- (void *)&pdcs->txd_64[i].ctrl1);
+ &pdcs->txd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
- D64_CTRL1_EOT,
- (void *)&pdcs->txd_64[i].ctrl1);
+ D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
}
/* Every rx descriptor can be used for start of frame */
if (i != pdcs->nrxpost) {
iowrite32(D64_CTRL1_SOF,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
}
}
- spin_unlock(&pdcs->pdc_lock);
return PDC_SUCCESS;
fail_dealloc:
@@ -1111,6 +1100,80 @@ static void pdc_ring_free(struct pdc_state *pdcs)
}
/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg: Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+ u32 cnt = 0;
+
+ while (sg) {
+ cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+ sg = sg_next(sg);
+ }
+ return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs: PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ * false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+ u32 rx_avail;
+ u32 tx_avail;
+ bool full = false;
+
+ /* Check if the tx and rx rings are likely to have enough space */
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_cnt > rx_avail)) {
+ pdcs->rx_ring_full++;
+ full = true;
+ }
+
+ if (likely(!full)) {
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(tx_cnt > tx_avail)) {
+ pdcs->tx_ring_full++;
+ full = true;
+ }
+ }
+ return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan: mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ bool ret;
+
+ if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+ PDC_RING_SPACE_MIN))) {
+ pdcs->last_tx_not_done++;
+ ret = false;
+ } else {
+ ret = true;
+ }
+ return ret;
+}
+
+/**
* pdc_send_data() - mailbox send_data function
* @chan: The mailbox channel on which the data is sent. The channel
* corresponds to a DMA ringset.
@@ -1141,29 +1204,43 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
int src_nent;
int dst_nent;
int nent;
+ u32 tx_desc_req;
+ u32 rx_desc_req;
- if (mssg->type != BRCM_MESSAGE_SPU)
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
- if (src_nent) {
+ if (likely(src_nent)) {
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
- if (nent == 0)
+ if (unlikely(nent == 0))
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
- if (dst_nent) {
+ if (likely(dst_nent)) {
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
- if (nent == 0) {
+ if (unlikely(nent == 0)) {
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
}
}
- spin_lock(&pdcs->pdc_lock);
+ /*
+ * Check if the tx and rx rings have enough space. Do this prior to
+ * writing any tx or rx descriptors. Need to ensure that we do not write
+ * a partial set of descriptors, or write just rx descriptors but
+ * corresponding tx descriptors don't fit. Note that we want this check
+ * and the entire sequence of descriptor to happen without another
+ * thread getting in. The channel spin lock in the mailbox framework
+ * ensures this.
+ */
+ tx_desc_req = pdc_desc_count(mssg->spu.src);
+ rx_desc_req = pdc_desc_count(mssg->spu.dst);
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+ return -ENOSPC;
/* Create rx descriptors to SPU catch response */
err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
@@ -1173,9 +1250,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
- spin_unlock(&pdcs->pdc_lock);
-
- if (err)
+ if (unlikely(err))
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);
@@ -1224,26 +1299,29 @@ void pdc_hw_init(struct pdc_state *pdcs)
/* initialize data structures */
pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
pdcs->txregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->rxregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->ntxd = PDC_RING_ENTRIES;
pdcs->nrxd = PDC_RING_ENTRIES;
pdcs->ntxpost = PDC_RING_ENTRIES - 1;
pdcs->nrxpost = PDC_RING_ENTRIES - 1;
- pdcs->regs->intmask = 0;
+ iowrite32(0, &pdcs->regs->intmask);
dma_reg = &pdcs->regs->dmaregs[ringset];
- iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
- iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
- iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+ /* Configure DMA but will enable later in pdc_ring_init() */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
- (void *)&dma_reg->dmarcv.control);
+ &dma_reg->dmarcv.control);
+
+ /* Reset current index pointers after making sure DMA is disabled */
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
iowrite32(PDC_CKSUM_CTRL,
@@ -1251,6 +1329,21 @@ void pdc_hw_init(struct pdc_state *pdcs)
}
/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+ struct dma64 *dma_reg;
+
+ dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+}
+
+/**
* pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
* header returned with each response message.
* @pdcs: PDC state structure
@@ -1301,8 +1394,6 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
struct device_node *dn = pdev->dev.of_node;
int err;
- pdcs->intstatus = 0;
-
/* interrupt configuration */
iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
@@ -1311,11 +1402,11 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
dev_name(dev), pdcs->pdc_irq, pdcs);
- err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
- pdc_irq_handler,
- pdc_irq_thread, 0, dev_name(dev), pdcs);
+
+ err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+ dev_name(dev), dev);
if (err) {
- dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+ dev_err(dev, "IRQ %u request failed with err %d\n",
pdcs->pdc_irq, err);
return err;
}
@@ -1324,6 +1415,7 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
static const struct mbox_chan_ops pdc_mbox_chan_ops = {
.send_data = pdc_send_data,
+ .last_tx_done = pdc_last_tx_done,
.startup = pdc_startup,
.shutdown = pdc_shutdown
};
@@ -1356,8 +1448,9 @@ static int pdc_mb_init(struct pdc_state *pdcs)
if (!mbc->chans)
return -ENOMEM;
- mbc->txdone_irq = true;
- mbc->txdone_poll = false;
+ mbc->txdone_irq = false;
+ mbc->txdone_poll = true;
+ mbc->txpoll_period = 1;
for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
mbc->chans[chan_index].con_priv = pdcs;
@@ -1427,7 +1520,6 @@ static int pdc_probe(struct platform_device *pdev)
goto cleanup;
}
- spin_lock_init(&pdcs->pdc_lock);
pdcs->pdev = pdev;
platform_set_drvdata(pdev, pdcs);
pdcs->pdc_idx = pdcg.num_spu;
@@ -1473,6 +1565,9 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
+ /* Init tasklet for deferred DMA rx processing */
+ tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
+
err = pdc_interrupts_init(pdcs);
if (err)
goto cleanup_buf_pool;
@@ -1489,6 +1584,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
+ tasklet_kill(&pdcs->rx_tasklet);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1504,6 +1600,10 @@ static int pdc_remove(struct platform_device *pdev)
pdc_free_debugfs();
+ tasklet_kill(&pdcs->rx_tasklet);
+
+ pdc_hw_disable(pdcs);
+
mbox_controller_unregister(&pdcs->mbc);
dma_pool_destroy(pdcs->rx_buf_pool);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index a334db5c9f1c..41bcd339b68a 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -403,6 +403,7 @@ static const struct of_device_id sti_mailbox_match[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 9ca96e9db6bf..9c79f8019d2a 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -11,12 +11,14 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -39,6 +41,8 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ wait_queue_head_t waitq;
+ struct fasync_struct *async_queue;
};
static ssize_t mbox_test_signal_write(struct file *filp,
@@ -81,6 +85,13 @@ static const struct file_operations mbox_test_signal_ops = {
.llseek = generic_file_llseek,
};
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
static ssize_t mbox_test_message_write(struct file *filp,
const char __user *userbuf,
size_t count, loff_t *ppos)
@@ -138,6 +149,20 @@ out:
return ret < 0 ? ret : count;
}
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+ unsigned char data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ data = tdev->rx_buffer[0];
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ if (data != '\0')
+ return true;
+ return false;
+}
+
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -147,6 +172,8 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
int l = 0;
int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
if (!touser)
return -ENOMEM;
@@ -155,15 +182,29 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
ret = simple_read_from_buffer(userbuf, count, ppos,
touser, ret);
- goto out;
+ goto kfree_err;
}
- if (tdev->rx_buffer[0] == '\0') {
- ret = snprintf(touser, 9, "<EMPTY>\n");
- ret = simple_read_from_buffer(userbuf, count, ppos,
- touser, ret);
- goto out;
- }
+ add_wait_queue(&tdev->waitq, &wait);
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (mbox_test_message_data_ready(tdev))
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto waitq_err;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto waitq_err;
+ }
+ schedule();
+
+ } while (1);
spin_lock_irqsave(&tdev->lock, flags);
@@ -185,14 +226,31 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
spin_unlock_irqrestore(&tdev->lock, flags);
ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
-out:
+waitq_err:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
kfree(touser);
return ret;
}
+static unsigned int
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ poll_wait(filp, &tdev->waitq, wait);
+
+ if (mbox_test_message_data_ready(tdev))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
static const struct file_operations mbox_test_message_ops = {
.write = mbox_test_message_write,
.read = mbox_test_message_read,
+ .fasync = mbox_test_message_fasync,
+ .poll = mbox_test_message_poll,
.open = simple_open,
.llseek = generic_file_llseek,
};
@@ -234,6 +292,10 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
spin_unlock_irqrestore(&tdev->lock, flags);
+
+ wake_up_interruptible(&tdev->waitq);
+
+ kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
}
static void mbox_test_prepare_message(struct mbox_client *client, void *message)
@@ -290,6 +352,7 @@ static int mbox_test_probe(struct platform_device *pdev)
{
struct mbox_test_device *tdev;
struct resource *res;
+ resource_size_t size;
int ret;
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -298,14 +361,21 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ size = resource_size(res);
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->tx_mmio))
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+ /* if reserved area in SRAM, try just ioremap */
+ tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->tx_mmio))
tdev->tx_mmio = NULL;
/* If specified, second reg entry is Rx MMIO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ size = resource_size(res);
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->rx_mmio))
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+ tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->rx_mmio))
tdev->rx_mmio = tdev->tx_mmio;
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
@@ -334,6 +404,7 @@ static int mbox_test_probe(struct platform_device *pdev)
if (ret)
return ret;
+ init_waitqueue_head(&tdev->waitq);
dev_info(&pdev->dev, "Successfully registered\n");
return 0;
@@ -357,6 +428,7 @@ static const struct of_device_id mbox_test_match[] = {
{ .compatible = "mailbox-test" },
{},
};
+MODULE_DEVICE_TABLE(of, mbox_test_match);
static struct platform_driver mbox_test_driver = {
.driver = {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 1f32688c312d..dd9ecd354a3e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -447,7 +447,6 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
- acpi_size pcct_tbl_header_size;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_header *pcct_entry;
struct acpi_table_pcct *acpi_pcct_tbl;
@@ -456,9 +455,7 @@ static int __init acpi_pcc_probe(void)
acpi_status status = AE_OK;
/* Search for PCCT */
- status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
- &pcct_tbl,
- &pcct_tbl_header_size);
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
if (ACPI_FAILURE(status) || !pcct_tbl) {
pr_warn("PCCT header not found.\n");
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6b420a55c745..c3ea03c9a1a8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -425,7 +425,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
- unsigned invalidate_needs_gc:1;
+ unsigned invalidate_needs_gc;
bool discard; /* Get rid of? */
@@ -593,8 +593,8 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
+ wait_queue_head_t gc_wait;
- wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 6fdd8e252760..a43eedd5804d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
bch_moving_gc(c);
}
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
{
- struct cache_set *c = arg;
struct cache *ca;
unsigned i;
- while (1) {
-again:
- bch_btree_gc(c);
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc)
+ return true;
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
- break;
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
- mutex_lock(&c->bucket_lock);
+ return false;
+}
- for_each_cache(ca, c, i)
- if (ca->invalidate_needs_gc) {
- mutex_unlock(&c->bucket_lock);
- set_current_state(TASK_RUNNING);
- goto again;
- }
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
- mutex_unlock(&c->bucket_lock);
+ while (1) {
+ wait_event_interruptible(c->gc_wait,
+ kthread_should_stop() || gc_should_run(c));
- schedule();
+ if (kthread_should_stop())
+ break;
+
+ set_gc_sectors(c);
+ bch_btree_gc(c);
}
return 0;
@@ -1790,11 +1792,10 @@ again:
int bch_gc_thread_start(struct cache_set *c)
{
- c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread);
- set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 5c391fa01bed..9b80417cd547 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
- if (c->gc_thread)
- wake_up_process(c->gc_thread);
+ wake_up(&c->gc_wait);
}
#define MAP_DONE 0
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f49c5417527d..76d20875503c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
- set_gc_sectors(op->c);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
- }
if (op->bypass)
return bch_data_invalidate(cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2fb5bfeb43e2..3a19cbc8b230 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS 16 /* partition support */
/* Superblock */
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (minor < 0)
return minor;
+ minor *= BCACHE_MINORS;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->disk = alloc_disk(1))) {
+ !(d->disk = alloc_disk(BCACHE_MINORS))) {
ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
}
@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
+ set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 543eadd230e5..1076b9d89df3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -496,8 +496,7 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
* Returns enum mmc_blk_status after checking errors.
*/
static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
- struct mmc_request *mrq,
- struct mmc_async_req *next_req)
+ struct mmc_request *mrq)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
@@ -507,7 +506,7 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
- context_info->is_waiting_last_req = false;
+
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
cmd = mrq->cmd;
@@ -527,10 +526,9 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
__mmc_start_request(host, mrq);
continue; /* wait for done/new event again */
}
- } else if (context_info->is_new_req) {
- if (!next_req)
- return MMC_BLK_NEW_REQUEST;
}
+
+ return MMC_BLK_NEW_REQUEST;
}
mmc_retune_release(host);
return status;
@@ -660,7 +658,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq);
if (host->areq) {
- status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+ status = mmc_wait_for_data_req_done(host, host->areq->mrq);
if (status == MMC_BLK_NEW_REQUEST) {
if (ret_stat)
*ret_stat = status;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index deb90c2ff6b4..a614f37faf27 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -223,6 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
+ u32 *raw_ssr;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +232,21 @@ static int mmc_read_ssr(struct mmc_card *card)
return 0;
}
- if (mmc_app_sd_status(card, card->raw_ssr)) {
+ raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+ if (!raw_ssr)
+ return -ENOMEM;
+
+ if (mmc_app_sd_status(card, raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
+ kfree(raw_ssr);
return 0;
}
for (i = 0; i < 16; i++)
- card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+ card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+ kfree(raw_ssr);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 1501cfdac473..4b0ecb981842 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -262,6 +262,7 @@ disable_clk:
}
static const struct of_device_id sdhci_cdns_match[] = {
+ { .compatible = "socionext,uniphier-sd4hc" },
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 111991e5b9a0..23909804ffb8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1576,6 +1576,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
u8 ctrl;
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -2938,22 +2941,24 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
- /* Force clock and power re-program */
- host->pwr = 0;
- host->clock = 0;
- mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
- mmc->ops->set_ios(mmc, &mmc->ios);
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
- if ((host_flags & SDHCI_PV_ENABLED) &&
- !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
- spin_lock_irqsave(&host->lock, flags);
- sdhci_enable_preset_value(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
- }
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
- if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
- mmc->ops->hs400_enhanced_strobe)
- mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ }
spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6e16e441f85e..e4c28fed61d5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -166,7 +166,6 @@ source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
-source "drivers/net/ethernet/sfc/falcon/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 16e12c45904b..21f80f5744ba 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1469,6 +1469,12 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
p->agl_prt_ctl_size);
+ if (!p->mix || !p->agl || !p->agl_prt_ctl) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ result = -ENOMEM;
+ goto err;
+ }
+
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..a267173f5997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_RX_ISCSI_CMP = 0x45,
CPL_TRACE_PKT_T5 = 0x48,
CPL_RX_ISCSI_DDP = 0x49,
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
__u8 status;
};
+struct cpl_rx_iscsi_cmp {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
struct cpl_tx_data_iso {
__be32 op_to_scsi;
__u8 reserved1;
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 79b7c84b7869..dc0850b3b517 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -1,6 +1,6 @@
config FSL_FMAN
tristate "FMan support"
- depends on FSL_SOC || COMPILE_TEST
+ depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
default n
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index dafd9e1baba2..f60845f0c6ca 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1890,6 +1890,7 @@ static int fman_reset(struct fman *fman)
goto _return;
} else {
+#ifdef CONFIG_PPC
struct device_node *guts_node;
struct ccsr_guts __iomem *guts_regs;
u32 devdisr2, reg;
@@ -1921,6 +1922,7 @@ static int fman_reset(struct fman *fman)
/* Enable all MACs */
iowrite32be(reg, &guts_regs->devdisr2);
+#endif
/* Perform FMan reset */
iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
@@ -1932,25 +1934,31 @@ static int fman_reset(struct fman *fman)
} while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
FPM_RSTC_FM_RESET) && --count);
if (count == 0) {
+#ifdef CONFIG_PPC
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
err = -EBUSY;
goto _return;
}
+#ifdef CONFIG_PPC
/* Restore devdisr2 value */
iowrite32be(devdisr2, &guts_regs->devdisr2);
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
goto _return;
+#ifdef CONFIG_PPC
guts_regs:
of_node_put(guts_node);
guts_node:
dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
__func__);
+#endif
}
_return:
return err;
@@ -2868,6 +2876,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dev = &of_dev->dev;
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
return fman;
fman_node_put:
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 69ca42ce5dd5..0b31f8502ada 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -594,6 +594,7 @@ static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
};
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index ee7e9ce2f5b3..418ca1f3774a 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1316,10 +1316,11 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id hix5hd2_of_match[] = {
- { .compatible = "hisilicon,hisi-gemac-v1", .data = (void *)GEMAC_V1 },
- { .compatible = "hisilicon,hisi-gemac-v2", .data = (void *)GEMAC_V2 },
- { .compatible = "hisilicon,hix5hd2-gemac", .data = (void *)GEMAC_V1 },
- { .compatible = "hisilicon,hi3798cv200-gemac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
{},
};
@@ -1327,7 +1328,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
static struct platform_driver hix5hd2_dev_driver = {
.driver = {
- .name = "hisi-gemac",
+ .name = "hisi-gmac",
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
@@ -1338,4 +1339,4 @@ module_platform_driver(hix5hd2_dev_driver);
MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hisi-gemac");
+MODULE_ALIAS("platform:hisi-gmac");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3b026c151cf2..7431f633de31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -75,7 +75,7 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
struct rb_node *parent = NULL;
while (*new) {
- struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
int result = counter->id - this->id;
parent = *new;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 46f7be85f5a3..2c032629c369 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,3 +1,20 @@
+#
+# Solarflare device configuration
+#
+
+config NET_VENDOR_SOLARFLARE
+ bool "Solarflare devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Solarflare devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOLARFLARE
+
config SFC
tristate "Solarflare SFC9000/SFC9100-family support"
depends on PCI
@@ -44,3 +61,7 @@ config SFC_MCDI_LOGGING
Driver-Interface) commands and responses, allowing debugging of
driver/firmware interaction. The tracing is actually enabled by
a sysfs file 'mcdi_logging' under the PCI device.
+
+source "drivers/net/ethernet/sfc/falcon/Kconfig"
+
+endif # NET_VENDOR_SOLARFLARE
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a340fc8bd0de..8816515e1bbb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des3 = cpu_to_le32(tdes3);
}
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des3 = cpu_to_le32(tdes3);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ce97e522566a..f0d86321dfe2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des0 = cpu_to_le32(tdes0);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3e405785b81c..bb40382e205d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
}
netdev_sent_queue(dev, skb->len);
@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
}
- wmb();
+ dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
else
priv->hw->desc->set_rx_owner(p);
- wmb();
+ dma_wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index c7e547e4f2b1..7d9e36f66735 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -94,6 +94,7 @@
/* offset relative to base of XGBE_SS_REG_INDEX */
#define XGBE10_SGMII_MODULE_OFFSET 0x100
+#define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
/* offset relative to base of XGBE_SM_REG_INDEX */
#define XGBE10_HOST_PORT_OFFSET 0x34
#define XGBE10_SLAVE_PORT_OFFSET 0x64
@@ -1746,6 +1747,17 @@ static void keystone_set_msglevel(struct net_device *ndev, u32 value)
netcp->msg_enable = value;
}
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+ return gbe_intf;
+}
+
static void keystone_get_stat_strings(struct net_device *ndev,
uint32_t stringset, uint8_t *data)
{
@@ -1754,7 +1766,7 @@ static void keystone_get_stat_strings(struct net_device *ndev,
struct gbe_priv *gbe_dev;
int i;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
gbe_dev = gbe_intf->gbe_dev;
@@ -1778,7 +1790,7 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
gbe_dev = gbe_intf->gbe_dev;
@@ -1896,7 +1908,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
@@ -1920,7 +1932,7 @@ static int keystone_get_link_ksettings(struct net_device *ndev,
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
@@ -1953,7 +1965,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev,
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
@@ -2311,7 +2323,7 @@ static void gbe_init_host_port(struct gbe_priv *priv)
int bypass_en = 1;
/* Host Tx Pri */
- if (IS_SS_ID_NU(priv))
+ if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
writel(HOST_TX_PRI_MAP_DEFAULT,
GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9c06f8028f0c..92b08383cafa 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1187,8 +1187,8 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
static int genphy_config_eee_advert(struct phy_device *phydev)
{
- u32 broken = phydev->eee_broken_modes;
- u32 old_adv, adv;
+ int broken = phydev->eee_broken_modes;
+ int old_adv, adv;
/* Nothing to disable */
if (!broken)
@@ -1665,7 +1665,7 @@ static void of_set_phy_supported(struct phy_device *phydev)
static void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- u32 broken;
+ u32 broken = 0;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return;
@@ -1673,8 +1673,20 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
if (!node)
return;
- if (!of_property_read_u32(node, "eee-broken-modes", &broken))
- phydev->eee_broken_modes = broken;
+ if (of_property_read_bool(node, "eee-broken-100tx"))
+ broken |= MDIO_EEE_100TX;
+ if (of_property_read_bool(node, "eee-broken-1000t"))
+ broken |= MDIO_EEE_1000T;
+ if (of_property_read_bool(node, "eee-broken-10gt"))
+ broken |= MDIO_EEE_10GT;
+ if (of_property_read_bool(node, "eee-broken-1000kx"))
+ broken |= MDIO_EEE_1000KX;
+ if (of_property_read_bool(node, "eee-broken-10gkx4"))
+ broken |= MDIO_EEE_10GKX4;
+ if (of_property_read_bool(node, "eee-broken-10gkr"))
+ broken |= MDIO_EEE_10GKR;
+
+ phydev->eee_broken_modes = broken;
}
/**
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2fd7dc2e8fc4..3d21a154dce7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -50,7 +50,7 @@
#define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-
+
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
{
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
- return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 581001989937..d5bf36ec8a75 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
/**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
* @tag: identifier for event
* @erp: erp_action running
*/
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
- debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+ zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
+/**
* zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
* @tag: identifier for event
* @wka_port: well known address port
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 36d07584271d..db186d44cfaf 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2016
*/
#ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
struct zfcp_dbf_scsi scsi_buf;
};
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+ struct fsf_qtcb *qtcb = req->qtcb;
+ u32 fsf_stat = qtcb->header.fsf_status;
+ struct fcp_resp *fcp_rsp;
+ u8 rsp_flags, fr_status;
+
+ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+ return false; /* not an FCP response */
+ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+ rsp_flags = fcp_rsp->fr_flags;
+ fr_status = fcp_rsp->fr_status;
+ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+ (rsp_flags == FCP_RESID_UNDER) &&
+ (fr_status == SAM_STAT_GOOD);
+}
+
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+ zfcp_dbf_hba_fsf_resp("fs_ferr",
+ zfcp_dbf_hba_fsf_resp_suppress(req)
+ ? 5 : 1, req);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
}
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+ struct zfcp_fsf_req *fsf_req)
+{
+ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index a59d678125bd..7ccfce559034 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
}
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+ unsigned long flags;
+ struct zfcp_adapter *adapter = port->adapter;
+ int port_status;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct scsi_device *sdev;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ port_status = atomic_read(&port->status);
+ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+ /* new ERP of severity >= port triggered elsewhere meanwhile or
+ * local link down (adapter erp_failed but not clear unblock)
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ spin_lock(shost->host_lock);
+ __shost_for_each_device(sdev, shost) {
+ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+ int lun_status;
+
+ if (zsdev->port != port)
+ continue;
+ /* LUN under port of interest */
+ lun_status = atomic_read(&zsdev->status);
+ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ continue; /* unblock rport despite failed LUNs */
+ /* LUN recovery not given up yet [maybe follow-up pending] */
+ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+ /* LUN blocked:
+ * not yet unblocked [LUN recovery pending]
+ * or meanwhile blocked [new LUN recovery triggered]
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ }
+ /* now port has no child or all children have completed recovery,
+ * and no ERP of severity >= port was meanwhile triggered elsewhere
+ */
+ zfcp_scsi_schedule_rport_register(port);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
+ zfcp_erp_try_rport_unblock(port);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
*/
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ zfcp_erp_try_rport_unblock(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 968a0ab4b398..9afdbc32b23f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ struct zfcp_erp_action *erp);
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index be1c04b334c5..ea3c76ac0de1 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef FSF_H
@@ -78,6 +78,7 @@
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 7c2c6194dfca..703fce59befe 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2016
*/
#ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
spin_unlock_irqrestore(&rl->lock, flags);
}
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ * function will be the target-request; the second parameter is the same
+ * pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+ void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+ struct zfcp_fsf_req *req;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_for_each_entry(req, &rl->buckets[i], list)
+ f(req, data);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9069f98a1817..07ffdbb5107f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
- /* This could be either
- * open LUN pending: this is temporary, will result in
- * open LUN or ERP_FAILED, so retry command
+ /* This could be
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
return retval;
}
+struct zfcp_scsi_req_filter {
+ u8 tmf_scope;
+ u32 lun_handle;
+ u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+ struct zfcp_scsi_req_filter *filter =
+ (struct zfcp_scsi_req_filter *)data;
+
+ /* already aborted - prevent side-effects - or not a SCSI command */
+ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ return;
+
+ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+ if (old_req->qtcb->header.port_handle != filter->port_handle)
+ return;
+
+ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+ old_req->qtcb->header.lun_handle != filter->lun_handle)
+ return;
+
+ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+ old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+ struct zfcp_adapter *adapter = zsdev->port->adapter;
+ struct zfcp_scsi_req_filter filter = {
+ .tmf_scope = FCP_TMF_TGT_RESET,
+ .port_handle = zsdev->port->handle,
+ };
+ unsigned long flags;
+
+ if (tm_flags == FCP_TMF_LUN_RESET) {
+ filter.tmf_scope = FCP_TMF_LUN_RESET;
+ filter.lun_handle = zsdev->lun_handle;
+ }
+
+ /*
+ * abort_lock secures against other processings - in the abort-function
+ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+ */
+ write_lock_irqsave(&adapter->abort_lock, flags);
+ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+ &filter);
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
- } else
+ } else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ }
zfcp_fsf_req_free(fsf_req);
return retval;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..316f87fe3299 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,8 +1,8 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
Note: This version of the driver does not contain a bundled firmware
image.
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 0fdc83cfa0e1..b6c208cc474f 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,8 +1,8 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714..970d8fa6bd53 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1,7 +1,7 @@
/*
3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -43,10 +43,7 @@
LSI 3ware 9750 6Gb/s SAS/SATA-RAID
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
History
-------
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index fec6449c7595..05e77d84c16d 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -1,7 +1,7 @@
/*
3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -39,10 +39,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_SAS_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba1613e21..aa412ab02765 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,7 +1,7 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -47,10 +47,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
+
History
-------
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 6f65e663d393..69e80c1ed1ca 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,7 +1,7 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
+
+ aradford@gmail.com
For more information, goto:
http://www.lsi.com
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index dfa93347c752..a4f6b0d95515 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a2d03957cbe2..736b77414a4b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI) += libiscsi.o qedi/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d849ffa378b1..4f5ca794bb71 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -97,9 +97,6 @@
* and macros and include this file in your driver.
*
* These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
@@ -127,9 +124,7 @@
* NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID. If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
*/
#ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
#endif
-
-static int probe_irq;
-
-/**
- * probe_intr - helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
- probe_irq = irq;
- return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq - find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
- int possible)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long timeout;
- int trying_irqs, i, mask;
-
- for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
- trying_irqs |= mask;
-
- timeout = jiffies + msecs_to_jiffies(250);
- probe_irq = NO_IRQ;
-
- /*
- * A interrupt is triggered whenever BSY = false, SEL = true
- * and a bit set in the SELECT_ENABLE_REG is asserted on the
- * SCSI bus.
- *
- * Note that the bus is only driven when the phase control signals
- * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
- * to zero.
- */
-
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
- while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
- schedule_timeout_uninterruptible(1);
-
- NCR5380_write(SELECT_ENABLE_REG, 0);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if (trying_irqs & mask)
- free_irq(i, NULL);
-
- return probe_irq;
-}
-
/**
* NCR58380_info - report driver and host information
* @instance: relevant scsi host instance
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 3c6ce5434449..51a3567a6fb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,16 +199,6 @@
#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
-/*
- * These are "special" values for the irq and dma_channel fields of the
- * Scsi_Host structure
- */
-
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
-
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
#endif
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
static int NCR5380_init(struct Scsi_Host *instance, int flags);
static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
static void NCR5380_exit(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e4f3e22fcbd9..3ecbf20ca29f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
- { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9e6f647ff1c1..9a2fdc305cf2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
+ } else {
+ struct cpl_t6_act_open_req *req =
+ (struct cpl_t6_act_open_req *)skb->head;
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ req->rsvd = cpu_to_be32(isn);
+
+ opt2 |= T5_ISS_VALID;
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
- (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+ (&csk->saddr), (&csk->daddr),
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
csk->state, csk->flags, csk->atid, csk->rss_qid);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
- RX_FC_DISABLE_F |
RSS_QUEUE_V(csk->rss_qid);
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req6 *req =
(struct cpl_act_open_req6 *)skb->head;
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req6 *req =
(struct cpl_t5_act_open_req6 *)skb->head;
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
+ } else {
+ struct cpl_t6_act_open_req6 *req =
+ (struct cpl_t6_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
- t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+ csk->flags, csk->atid,
&csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
&csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
atid, tid, csk, csk->state, csk->flags, rcv_isn);
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
cxgbi_sock_get(csk);
csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
if (is_neg_adv(status))
goto rel_skb;
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *lskb;
+ u32 tid = GET_TID(cpl);
+ u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, skb,
+ skb->len, pdu_len_ddp);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cpl));
+ __pskb_trim(skb, ntohs(cpl->len));
+
+ if (!csk->skb_ulp_lhdr)
+ csk->skb_ulp_lhdr = skb;
+
+ lskb = csk->skb_ulp_lhdr;
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+ csk, csk->state, csk->flags, skb, lskb);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ spin_unlock_bh(&csk->lock);
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+ struct sk_buff *skb, u32 ddpvld)
+{
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+ }
+
+ if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+ !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+ }
+}
+
static void do_rx_data_ddp(struct cxgbi_device *cdev,
struct sk_buff *skb)
{
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
- unsigned int status = ntohl(rpl->ddpvld);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
- csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
spin_lock_bh(&csk->lock);
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
- if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
- }
- if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
- !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
- }
+ cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
__kfree_skb(skb);
}
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *data_skb = NULL;
+ u32 tid = GET_TID(rpl);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+ u32 seq = be32_to_cpu(rpl->seq);
+ u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+ "pdu_len_ddp %u, status %u.\n",
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+ ntohs(rpl->len), pdu_len_ddp, rpl->status);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = seq;
+ cxgbi_skcb_flags(skb) = 0;
+ cxgbi_skcb_rx_pdulen(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*rpl));
+ __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+ csk->rcv_nxt = seq + pdu_len_ddp;
+
+ if (csk->skb_ulp_lhdr) {
+ data_skb = skb_peek(&csk->receive_queue);
+ if (!data_skb ||
+ !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+ pr_err("Error! freelist data not found 0x%p, tid %u\n",
+ data_skb, tid);
+
+ goto abort_conn;
+ }
+ __skb_unlink(data_skb, &csk->receive_queue);
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ __skb_queue_tail(&csk->receive_queue, data_skb);
+ } else {
+ __skb_queue_tail(&csk->receive_queue, skb);
+ }
+
+ csk->skb_ulp_lhdr = NULL;
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+ cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+ cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+ log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+ csk, skb, cxgbi_skcb_flags(skb));
+
+ cxgbi_conn_pdu_ready(csk);
+ spin_unlock_bh(&csk->lock);
+
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
void *daddr;
unsigned int step;
unsigned int size, size6;
- int t4 = is_t4(lldi->adapter_type);
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
#endif
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
size = sizeof(struct cpl_act_open_req);
size6 = sizeof(struct cpl_act_open_req6);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
size = sizeof(struct cpl_t5_act_open_req);
size6 = sizeof(struct cpl_t5_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t6_act_open_req);
+ size6 = sizeof(struct cpl_t6_act_open_req6);
}
if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
- /* SMT two entries per row */
- csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+ cxgb4_port_viid(ndev));
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu, csk->mss_idx, csk->smac_idx);
/* must wait for either a act_open_rpl or act_open_establish */
- try_module_get(THIS_MODULE);
+ if (!try_module_get(cdev->owner)) {
+ pr_err("%s, try_module_get failed.\n", ndev->name);
+ goto rel_resource;
+ }
+
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
if (csk->csk_family == AF_INET)
send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
[CPL_FW4_ACK] = do_fw4_ack,
[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
- [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+ [CPL_ISCSI_DATA] = do_rx_iscsi_data,
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
[CPL_RX_DATA] = do_rx_data,
};
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
- cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+ cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+ CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
+ cdev->owner = THIS_MODULE;
cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
<< FW_VIID_PFN_S;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..9167bcd9fffe 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
n->dev->name, ndev->name, mtu);
}
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_neigh;
+ }
+
cdev = cxgbi_device_find_by_netdev(ndev, &port);
if (!cdev) {
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
}
ndev = n->dev;
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_rt;
+ }
+
if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
pr_info("multi-cast route %pI6 port %u, dev %s.\n",
daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+ struct module *owner = csk->cdev->owner;
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
__kfree_skb(skb);
+
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
return -EIO;
}
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+ cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+ /* If completion flag is set and data is directly
+ * placed in to the host memory then update
+ * task->exp_datasn to the datasn in completion
+ * iSCSI hdr as T6 adapter generates completion only
+ * for the last pdu of a sequence.
+ */
+ itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+ struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+ u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+ skb->data)->datasn);
+ if (task && task->sc) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ tcp_task->exp_datasn = data_sn;
+ }
+ }
+
return read_pdu_skb(conn, skb, 0, 0);
}
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
csk->rcv_wup, cdev->rx_credit_thres,
csk->rcv_win);
+ if (!cdev->rx_credit_thres)
+ return;
+
if (csk->state != CTP_ESTABLISHED)
return;
credits = csk->copied_seq - csk->rcv_wup;
if (unlikely(!credits))
return;
- if (unlikely(cdev->rx_credit_thres == 0))
- return;
-
must_send = credits + 16384 >= csk->rcv_win;
if (must_send || credits >= cdev->rx_credit_thres)
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..95ba99044c3e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
SKCBF_RX_STATUS, /* received ddp status */
+ SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
struct pci_dev *pdev;
struct dentry *debugfs_root;
struct iscsi_transport *itp;
+ struct module *owner;
unsigned int pfvf;
unsigned int rx_credit_thres;
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index de5147a8c959..6f9665d50d84 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -37,7 +37,7 @@
#define MAX_CARDS 8
/* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
module_param(hp_c2502, int, 0);
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /*
+ * An interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR.
+ */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG,
+ ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+ msleep(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int irq_mask, irq;
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ irq_mask = probe_irq_on();
+ g_NCR5380_trigger_irq(instance);
+ irq = probe_irq_off(irq_mask);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ if (irq <= 0)
+ return NO_IRQ;
+ return irq;
+}
+
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
outb(magic[3], 0x379);
outb(magic[4], 0x379);
- /* allowed IRQs for HP C2502 */
- if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
- irq = 0;
+ if (irq == 9)
+ irq = 2;
+
if (idx >= 0 && idx <= 7)
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+ while (*irq_table != -1) {
+ if (!request_irq(*irq_table, legacy_empty_irq_handler,
+ IRQF_PROBE_SHARED, "Test IRQ",
+ (void *)irq_table)) {
+ free_irq(*irq_table, (void *) irq_table);
+ return *irq_table;
+ }
+ irq_table++;
+ }
+ return -1;
+}
+
static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
+static int hp_c2502_irqs[] = {
+ 9, 5, 7, 3, 4, -1
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
}
}
+ /* Check for vacant slot */
+ NCR5380_write(MODE_REG, 0);
+ if (NCR5380_read(MODE_REG) != 0) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
NCR5380_maybe_reset_bus(instance);
- if (irq != IRQ_AUTO)
- instance->irq = irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
/* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ if (irq == 255 || irq == 0)
+ irq = NO_IRQ;
+ else if (irq == -1)
+ irq = IRQ_AUTO;
+
+ if (board == BOARD_HP_C2502) {
+ int *irq_table = hp_c2502_irqs;
+ int board_irq = -1;
+
+ switch (irq) {
+ case NO_IRQ:
+ board_irq = 0;
+ break;
+ case IRQ_AUTO:
+ board_irq = legacy_find_free_irq(irq_table);
+ break;
+ default:
+ while (*irq_table != -1)
+ if (*irq_table++ == irq)
+ board_irq = irq;
+ }
+
+ if (board_irq <= 0) {
+ board_irq = 0;
+ irq = NO_IRQ;
+ }
+
+ magic_configure(port_idx, board_irq, magic);
+ }
+
+ if (irq == IRQ_AUTO) {
+ instance->irq = g_NCR5380_probe_irq(instance);
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq detected\n");
+ } else {
+ instance->irq = irq;
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq provided\n");
+ }
if (instance->irq != NO_IRQ) {
- /* set IRQ for HP C2502 */
- if (board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = NO_IRQ;
+ shost_printk(KERN_INFO, instance,
+ "irq %d denied\n", instance->irq);
+ } else {
+ shost_printk(KERN_INFO, instance,
+ "irq %d acquired\n", instance->irq);
}
}
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
ret = scsi_add_host(instance, pdev);
if (ret)
goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
int ret = 0;
/* compatibility with old-style parameters */
- if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
irq[0] = ncr_irq;
base[0] = ncr_addr;
if (ncr_5380)
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 3ce5b65ccb00..81b22d989648 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -51,4 +51,6 @@
#define BOARD_DTC3181E 3
#define BOARD_HP_C2502 4
+#define IRQ_AUTO 254
+
#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 691a09316952..cbc0c5fe5a60 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
/* Device is not on the list, add it. */
device = kmalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ if (!device)
return;
- }
+
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
spin_lock_irqsave(&h->offline_device_lock, flags);
list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
- if (!h->cmd_sg_list) {
- dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+ if (!h->cmd_sg_list)
return -ENOMEM;
- }
+
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
- if (!h->cmd_sg_list[i]) {
- dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+ if (!h->cmd_sg_list[i])
goto clean;
- }
+
}
return 0;
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
struct bmic_sense_subsystem_info *ssi;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
- if (ssi == NULL) {
- dev_warn(&h->pdev->dev,
- "%s: out of memory\n", __func__);
+ if (!ssi)
return;
- }
rc = hpsa_bmic_sense_subsystem_information(h,
scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
- dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
- __FILE__, __LINE__);
h->drv_req_rescan = 1;
goto out;
}
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
*/
if (!h->lastlogicals)
- goto out;
+ return rc;
logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
- if (!logdev) {
- dev_warn(&h->pdev->dev,
- "Out of memory, can't track lun changes.\n");
- goto out;
- }
+ if (!logdev)
+ return rc;
+
if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
dev_warn(&h->pdev->dev,
"report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
return;
options = kzalloc(sizeof(*options), GFP_KERNEL);
- if (!options) {
- dev_err(&h->pdev->dev,
- "Error: failed to disable rld caching, during alloc.\n");
+ if (!options)
return;
- }
c = cmd_alloc(h);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee6ef52..50cd01165e35 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -95,6 +95,7 @@ static int fast_fail = 1;
static int client_reserve = 1;
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
} else {
done = 1;
}
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
dev_warn(evt_struct->hostdata->dev,
- "bad SRP RSP type %d\n", rsp->opcode);
+ "bad SRP RSP type %#02x\n", rsp->opcode);
}
if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ list_add_tail(&hostdata->host_list, &ibmvscsi_head);
return 0;
add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ list_del(&hostdata->host_list);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e0f6c3aeb4ee..3a7875575616 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
+ struct list_head host_list;
atomic_t request_limit;
int client_migrated;
int reset_crq;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c9fa3565c671..2583e8b50b21 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 98b0ca79a5c5..65c6189885ab 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -26,6 +26,7 @@
#ifndef __H_IBMVSCSI_TGT
#define __H_IBMVSCSI_TGT
+#include <linux/interrupt.h>
#include "libsrp.h"
#define SYS_ID_NAME_LEN 64
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8de0eda8cd00..394fe1338d09 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
u8 block;
u8 tlr_snoop_check;
u8 ignore_delay_remove;
+ /* Iopriority Command Handling */
+ u8 ncq_prio_enable;
+
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
u16 smid);
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 050bd788ad02..95f0f24bac05 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
/*********** diagnostic trigger suppport *** END ****************************/
-
-
/*****************************************/
struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!scsih_ncq_prio_supp(sdev))
+ return -EINVAL;
+
+ sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+ return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+ _ctl_device_ncq_prio_enable_show,
+ _ctl_device_ncq_prio_enable_store);
+
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5c8f75247d73..b5c966e319d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
+ struct request *rq = scmd->request;
+ int class;
Mpi2SCSIIORequest_t *mpi_request;
u32 mpi_control;
u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* set tags */
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+ /* NCQ Prio supported, make sure control indicated high priority */
+ if (sas_device_priv_data->ncq_prio_enable) {
+ class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (class == IOPRIO_CLASS_RT)
+ mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+ }
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+ unsigned char *buf;
+ bool ncq_prio_supp = false;
+
+ if (!scsi_device_supports_vpd(sdev))
+ return ncq_prio_supp;
+
+ buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+ if (!buf)
+ return ncq_prio_supp;
+
+ if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+ ncq_prio_supp = (buf[213] >> 4) & 1;
+
+ kfree(buf);
+ return ncq_prio_supp;
+}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644
index 000000000000..23ca8a274586
--- /dev/null
+++ b/drivers/scsi/qedi/Kconfig
@@ -0,0 +1,10 @@
+config QEDI
+ tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+ depends on PCI && SCSI
+ depends on QED
+ select SCSI_ISCSI_ATTRS
+ select QED_LL2
+ select QED_ISCSI
+ ---help---
+ This driver supports iSCSI offload for the QLogic FastLinQ
+ 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644
index 000000000000..2b3e16b24299
--- /dev/null
+++ b/drivers/scsi/qedi/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+ qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644
index 000000000000..5ca3e8c28a3f
--- /dev/null
+++ b/drivers/scsi/qedi/qedi.h
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL 0
+#define QEDI_MODE_RECOVERY 1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE 1
+#define QEDI_MAX_ISCSI_TASK 4096
+#define QEDI_MAX_TASK_NUM 0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON 1024
+
+#define QEDI_MAX_BD_LEN 0xffff
+#define QEDI_BD_SPLIT_SZ 0x1000
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_FAST_SGE_COUNT 4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF 8
+#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN 60000
+#define QEDI_LOCAL_PORT_MAX 61024
+#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID 0xffff
+#define TX_RX_RING 16
+#define RX_RING (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE 0x400
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PATH_HANDLE 0xFE0000000UL
+
+struct qedi_uio_ctrl {
+ /* meta data */
+ u32 uio_hsi_version;
+
+ /* user writes */
+ u32 host_tx_prod;
+ u32 host_rx_cons;
+ u32 host_rx_bd_cons;
+ u32 host_tx_pkt_len;
+ u32 host_rx_cons_cnt;
+
+ /* driver writes */
+ u32 hw_tx_cons;
+ u32 hw_rx_prod;
+ u32 hw_rx_bd_prod;
+ u32 hw_rx_prod_cnt;
+
+ /* other */
+ u8 mac_addr[6];
+ u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+ u32 rx_pkt_index;
+ u32 rx_pkt_len;
+ u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
+ (QEDI_MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+ struct uio_info qedi_uinfo;
+ u32 uio_dev;
+ struct list_head list;
+
+ u32 ll2_ring_size;
+ void *ll2_ring;
+
+ u32 ll2_buf_size;
+ void *ll2_buf;
+
+ void *rx_pkt;
+ void *tx_pkt;
+
+ struct qedi_ctx *qedi;
+ struct pci_dev *pdev;
+ void *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE 2048
+#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+ u64 hw_p_cq; /* Completion queue PBL */
+ u64 hw_p_rq; /* Request queue PBL */
+ u64 hw_p_cmdq; /* Command queue PBL */
+};
+
+struct global_queue {
+ union iscsi_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_mem_size;
+ u32 cq_cons_idx; /* Completion queue consumer index */
+
+ void *cq_pbl;
+ dma_addr_t cq_pbl_dma;
+ u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+ struct qed_sb_info *sb_info;
+ u16 sb_id;
+#define QEDI_NAME_SIZE 16
+ char name[QEDI_NAME_SIZE];
+ struct qedi_ctx *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+ struct list_head list;
+ struct iscsi_cqe_solicited cqe;
+ u16 que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+ spinlock_t lock; /* Port id lock */
+ u16 start;
+ u16 max;
+ u16 next;
+ unsigned long *table;
+};
+
+struct qedi_itt_map {
+ __le32 itt;
+ struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE 2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ 0
+#define QEDI_IO_TRACE_RSP 1
+ u8 direction;
+ u16 task_id;
+ u32 cid;
+ u32 port_id; /* Remote port fabric ID */
+ int lun;
+ u8 op; /* SCSI CDB */
+ u8 lba[4];
+ unsigned int bufflen; /* SCSI buffer length */
+ unsigned int sg_count; /* Number of SG elements */
+ u8 fast_sgs; /* number of fast sgls */
+ u8 slow_sgs; /* number of slow sgls */
+ u8 cached_sgs; /* number of cached sgls */
+ int result; /* Result passed back to mid-layer */
+ unsigned long jiffies; /* Time stamp when I/O logged */
+ int refcount; /* Reference count for task id */
+ unsigned int blk_req_cpu; /* CPU that the task is queued on by
+ * blk layer
+ */
+ unsigned int req_cpu; /* CPU that the task is queued on */
+ unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
+ unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+ * returned to blk layer
+ */
+ bool cached_sge;
+ bool slow_sge;
+ bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM 256
+#define QEDI_BDQ_BUF_SIZE 256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+ void *buf_addr;
+ dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+ struct qedi_dbg_ctx dbg_ctx;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct qed_dev *cdev;
+ struct qed_dev_iscsi_info dev_info;
+ struct qed_int_info int_info;
+ struct qedi_glbl_q_params *p_cpuq;
+ struct global_queue **global_queues;
+ /* uio declaration */
+ struct qedi_uio_dev *udev;
+ struct list_head ll2_skb_list;
+ spinlock_t ll2_lock; /* Light L2 lock */
+ spinlock_t hba_lock; /* per port lock */
+ struct task_struct *ll2_recv_thread;
+ unsigned long flags;
+#define UIO_DEV_OPENED 1
+#define QEDI_IOTHREAD_WAKE 2
+#define QEDI_IN_RECOVERY 5
+#define QEDI_IN_OFFLINE 6
+
+ u8 mac[ETH_ALEN];
+ u32 src_ip[4];
+ u8 ip_type;
+
+ /* Physical address of above array */
+ dma_addr_t hw_p_cpuq;
+
+ struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+ void *bdq_pbl;
+ dma_addr_t bdq_pbl_dma;
+ size_t bdq_pbl_mem_size;
+ void *bdq_pbl_list;
+ dma_addr_t bdq_pbl_list_dma;
+ u8 bdq_pbl_list_num_entries;
+ void __iomem *bdq_primary_prod;
+ void __iomem *bdq_secondary_prod;
+ u16 bdq_prod_idx;
+ u16 rq_num_entries;
+
+ u32 msix_count;
+ u32 max_sqes;
+ u8 num_queues;
+ u32 max_active_conns;
+
+ struct iscsi_cid_queue cid_que;
+ struct qedi_endpoint **ep_tbl;
+ struct qedi_portid_tbl lcl_port_tbl;
+
+ /* Rx fast path intr context */
+ struct qed_sb_info *sb_array;
+ struct qedi_fastpath *fp_array;
+ struct qed_iscsi_tid tasks;
+
+#define QEDI_LINK_DOWN 0
+#define QEDI_LINK_UP 1
+ atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID 0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
+ unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+ struct qedi_itt_map *itt_map;
+ u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+ struct qed_pf_params pf_params;
+
+ struct workqueue_struct *tmf_thread;
+ struct workqueue_struct *offload_thread;
+
+ u16 ll2_mtu;
+
+ struct workqueue_struct *dpc_wq;
+
+ spinlock_t task_idx_lock; /* To protect gbl context */
+ s32 last_tidx_alloc;
+ s32 last_tidx_clear;
+
+ struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+ spinlock_t io_trace_lock; /* prtect trace Log buf */
+ u16 io_trace_idx;
+ unsigned int intr_cpu;
+ u32 cached_sgls;
+ bool use_cached_sge;
+ u32 slow_sgls;
+ bool use_slow_sge;
+ u32 fast_sgls;
+ bool use_fast_sge;
+
+ atomic_t num_offloads;
+};
+
+struct qedi_work {
+ struct list_head list;
+ struct qedi_ctx *qedi;
+ union iscsi_cqe cqe;
+ u16 que_idx;
+ bool is_solicited;
+};
+
+struct qedi_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock; /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+ return (info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644
index 000000000000..2bdedb9c39bc
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_WARN))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_notice("[%s]:[%s:%d]:%d: %pV",
+ dev_name(&qedi->pdev->dev), nfunc, line,
+ qedi->host_no, &vaf);
+ else
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 level, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & level))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ int ret = 0;
+
+ for (; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ pr_err("Unable to create sysfs %s attr, err(%d).\n",
+ iter->name, ret);
+ }
+ return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ for (; iter->name; iter++)
+ sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644
index 000000000000..c55572badfb0
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */
+#define QEDI_LOG_INFO 0x2 /* Informational logs,
+ * MAC address, WWPN, WWNN
+ */
+#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */
+#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */
+#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */
+#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */
+#define QEDI_LOG_TIMER 0x40 /* Timer events */
+#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */
+#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */
+#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */
+#define QEDI_LOG_BSG 0x1000 /* BSG logs */
+#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */
+#define QEDI_LOG_LPORT 0x4000 /* lport logs */
+#define QEDI_LOG_ELS 0x8000 /* ELS logs */
+#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
+#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
+#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
+ * free
+ */
+#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be
+ * enabled only at module load
+ * and not run-time.
+ */
+#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes,
+ * done with reference to TID,
+ * hence TRACK_TID also enabled.
+ */
+#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */
+#define QEDI_LOG_WARN 0x80000000 /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+ unsigned int host_no;
+ struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...) \
+ qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...) \
+ qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...) \
+ qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...) \
+ qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \
+ ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+ char *name;
+ struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+ char *oper_str;
+ ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+ char *name;
+ struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = drv##_dbg_##ops##_cmd_read, \
+ .write = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = drv##_dbg_##ops##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644
index 000000000000..955936274241
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops)
+{
+ char host_dirname[32];
+ struct dentry *file_dentry = NULL;
+
+ sprintf(host_dirname, "host%u", qedi->host_no);
+ qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+ if (!qedi->bdf_dentry)
+ return;
+
+ while (dops) {
+ if (!(dops->name))
+ break;
+
+ file_dentry = debugfs_create_file(dops->name, 0600,
+ qedi->bdf_dentry, qedi,
+ fops);
+ if (!file_dentry) {
+ QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+ "Debugfs entry %s creation failed\n",
+ dops->name);
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ return;
+ }
+ dops++;
+ fops++;
+ }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+ qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+ if (!qedi_dbg_root)
+ QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+ debugfs_remove_recursive(qedi_dbg_root);
+ qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (!do_not_recover)
+ do_not_recover = 1;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (do_not_recover)
+ do_not_recover = 0;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+ { "enable", qedi_dbg_do_not_recover_enable },
+ { "disable", qedi_dbg_do_not_recover_disable },
+ { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+ { "gbl_ctx", NULL },
+ { "do_not_recover", qedi_dbg_do_not_recover_ops},
+ { "io_trace", NULL },
+ { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+ struct qedi_dbg_ctx *qedi_dbg =
+ (struct qedi_dbg_ctx *)filp->private_data;
+ struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+ if (*ppos)
+ return 0;
+
+ while (lof) {
+ if (!(lof->oper_str))
+ break;
+
+ if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+ cnt = lof->oper_func(qedi_dbg);
+ break;
+ }
+
+ lof++;
+ }
+ return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+
+ if (*ppos)
+ return 0;
+
+ cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+ struct qedi_fastpath *fp = NULL;
+ struct qed_sb_info *sb_info = NULL;
+ struct status_block *sb = NULL;
+ struct global_queue *que = NULL;
+ int id;
+ u16 prod_idx;
+ struct qedi_ctx *qedi = s->private;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+ fp = &qedi->fp_array[id];
+ sb_info = fp->sb_info;
+ sb = sb_info->sb_virt;
+ prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+ STATUS_BLOCK_PROD_INDEX_MASK);
+ seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+ que = qedi->global_queues[fp->sb_id];
+ seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+ seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+ seq_puts(s, "=========== END ==================\n\n\n");
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+ int id, idx = 0;
+ struct qedi_ctx *qedi = s->private;
+ struct qedi_io_log *io_log;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP IO LOGS:\n");
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+ idx = qedi->io_trace_idx;
+ for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+ io_log = &qedi->io_trace_buf[idx];
+ seq_printf(s, "iodir-%d:", io_log->direction);
+ seq_printf(s, "tid-0x%x:", io_log->task_id);
+ seq_printf(s, "cid-0x%x:", io_log->cid);
+ seq_printf(s, "lun-%d:", io_log->lun);
+ seq_printf(s, "op-0x%02x:", io_log->op);
+ seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+ io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+ seq_printf(s, "buflen-%d:", io_log->bufflen);
+ seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+ seq_printf(s, "res-0x%08x:", io_log->result);
+ seq_printf(s, "jif-%lu:", io_log->jiffies);
+ seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+ seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+ seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+ seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+ idx++;
+ if (idx == QEDI_IO_TRACE_SIZE)
+ idx = 0;
+ }
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+ return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+ qedi_dbg_fileops_seq(qedi, gbl_ctx),
+ qedi_dbg_fileops(qedi, do_not_recover),
+ qedi_dbg_fileops_seq(qedi, io_trace),
+ { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644
index 000000000000..b1d3904ae8fd
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.sge_valid && sc) {
+ cmd->io_tbl.sge_valid = 0;
+ scsi_dma_unmap(sc);
+ }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_logout_rsp *resp_hdr;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_logout_response_hdr *cqe_logout_response;
+ struct qedi_cmd *cmd;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+ spin_lock(&session->back_lock);
+ resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_logout_response->opcode;
+ resp_hdr->flags = cqe_logout_response->flags;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_text_rsp *resp_hdr_ptr;
+ struct iscsi_text_response_hdr *cqe_text_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr_ptr->opcode = cqe_text_response->opcode;
+ resp_hdr_ptr->flags = cqe_text_response->flags;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->ttt = cqe_text_response->ttt;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+ pld_len = cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_cls_session *cls_sess;
+ int rval = 0;
+
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+ iscsi_block_session(session->cls_session);
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+ if (rval) {
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ iscsi_unblock_session(session->cls_session);
+ return;
+ }
+
+ iscsi_unblock_session(session->cls_session);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ spin_unlock(&session->back_lock);
+ kfree(resp_hdr_ptr);
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tmf_response_hdr *cqe_tmp_response;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 *tmp;
+
+ cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+ qedi_cmd = task->dd_data;
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ if (!qedi_cmd->tmf_resp_buf) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate resp buf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+ /* Fill up the header */
+ resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+ resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+ resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_tmp_response->hdr_second_dword &
+ ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+ tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+ if (likely(qedi_cmd->io_cmd_in_list)) {
+ qedi_cmd->io_cmd_in_list = false;
+ list_del_init(&qedi_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+ goto unblock_sess;
+ }
+
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ kfree(resp_hdr_ptr);
+
+unblock_sess:
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_login_rsp *resp_hdr_ptr;
+ struct iscsi_login_response_hdr *cqe_login_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+
+ cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+ resp_hdr_ptr->opcode = cqe_login_response->opcode;
+ resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->tsih = cqe_login_response->tsih;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+ resp_hdr_ptr->status_class = cqe_login_response->status_class;
+ resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+ pld_len = cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+
+ spin_unlock(&session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ char *ptr, int len)
+{
+ u16 idx = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+ len, qedi->bdq_prod_idx,
+ (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+ cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+ switch (cqe->unsol_cqe_type) {
+ case ISCSI_CQE_UNSOLICITED_SINGLE:
+ case ISCSI_CQE_UNSOLICITED_FIRST:
+ if (len)
+ memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+ break;
+ case ISCSI_CQE_UNSOLICITED_MIDDLE:
+ case ISCSI_CQE_UNSOLICITED_LAST:
+ break;
+ default:
+ break;
+ }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ int count)
+{
+ u16 tmp;
+ u16 idx = 0;
+ struct scsi_bd *pbl;
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+ pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+ pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, idx);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+ /* Increment producer to let f/w know we've handled the frame */
+ qedi->bdq_prod_idx += count;
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ u32 pdu_len, u32 num_bdqs,
+ char *bdq_data)
+{
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "num_bdqs [%d]\n", num_bdqs);
+
+ qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+ qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn, u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_nop_in_hdr *cqe_nop_in;
+ struct iscsi_nopin *hdr;
+ struct qedi_cmd *cmd;
+ int tgt_async_nop = 0;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+ pdu_len = cqe_nop_in->hdr_second_dword &
+ ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_nop_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+ hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ goto done;
+ }
+
+ /* Response to one of our nop-outs */
+ if (task) {
+ cmd = task->dd_data;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ lun[0] = 0xffffffff;
+ lun[1] = 0xffffffff;
+ memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+ return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_async_msg_hdr *cqe_async_msg;
+ struct iscsi_async *resp_hdr;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+
+ cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+ pdu_len = cqe_async_msg->hdr_second_dword &
+ ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+
+ resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_async_msg->opcode;
+ resp_hdr->flags = 0x80;
+
+ lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+ lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+ memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+ resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+ resp_hdr->async_event = cqe_async_msg->async_event;
+ resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+ resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+ resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+ pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ uint16_t que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_reject_hdr *cqe_reject;
+ struct iscsi_reject *hdr;
+ u32 pld_len, num_bdqs;
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+ pld_len = cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pld_len, num_bdqs, conn->data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_reject->opcode;
+ hdr->reason = cqe_reject->hdr_reason;
+ hdr->flags = cqe_reject->hdr_flags;
+ hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+ hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+ hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, pld_len);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct scsi_cmnd *sc_cmd;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_scsi_rsp *hdr;
+ struct iscsi_data_in_hdr *cqe_data_in;
+ int datalen = 0;
+ struct qedi_conn *qedi_conn;
+ u32 iscsi_cid;
+ bool mark_cmd_node_deleted = false;
+ u8 cqe_err_bits = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ spin_lock_bh(&session->back_lock);
+ /* get the scsi command */
+ sc_cmd = cmd->scsi_cmd;
+
+ if (!sc_cmd) {
+ QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+ goto error;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "SCp.ptr is NULL, returned in another context.\n");
+ goto error;
+ }
+
+ if (!sc_cmd->request) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "sc_cmd->request is NULL, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->special) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->special is NULL so request not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->q) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ qedi_iscsi_unmap_sg_list(cmd);
+
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
+ hdr->opcode = cqe_data_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+ hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ hdr->response = cqe_data_in->reserved1;
+ hdr->cmd_status = cqe_data_in->status_rsvd;
+ hdr->flags = cqe_data_in->flags;
+ hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+ if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+ datalen = cqe_data_in->reserved2 &
+ ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+ memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+ }
+
+ /* If f/w reports data underrun err then set residual to IO transfer
+ * length, set Underrun flag and clear Overrun flag explicitly
+ */
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+ hdr->itt, cqe_data_in->flags, cmd->task_id,
+ qedi_conn->iscsi_conn_id, hdr->residual_count,
+ scsi_bufflen(sc_cmd));
+ hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+ }
+
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ mark_cmd_node_deleted = true;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+error:
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *conn, uint16_t que_idx)
+{
+ struct iscsi_conn *iscsi_conn;
+ u32 hdr_opcode;
+
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ iscsi_conn = conn->cls_conn->dd_data;
+
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_SCSI_RESPONSE:
+ case ISCSI_OPCODE_DATA_IN:
+ qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+ break;
+ case ISCSI_OPCODE_LOGIN_RESPONSE:
+ qedi_process_login_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TMF_RESPONSE:
+ qedi_process_tmf_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TEXT_RESPONSE:
+ qedi_process_text_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_LOGOUT_RESPONSE:
+ qedi_process_logout_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+ }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+ "itid=0x%x, cmd task id=0x%x\n",
+ cqe->itid, cmd->task_id);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct qedi_work_map *work, *work_tmp;
+ u32 proto_itt = cqe->itid;
+ u32 ptmp_itt = 0;
+ itt_t protoitt = 0;
+ int found = 0;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 rtid = 0;
+ u32 iscsi_cid;
+ struct qedi_conn *qedi_conn;
+ struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct iscsi_task *mtask;
+ struct iscsi_tm *tmf_hdr = NULL;
+
+ iscsi_cid = cqe->conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ /* Based on this itt get the corresponding qedi_cmd */
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+ list) {
+ if (work->rtid == proto_itt) {
+ /* We found the command */
+ qedi_cmd = work->qedi_cmd;
+ if (!qedi_cmd->list_tmf_work) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+ proto_itt, qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+ found = 1;
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ rtid = work->rtid;
+
+ list_del_init(&work->list);
+ kfree(work);
+ qedi_cmd->list_tmf_work = NULL;
+ }
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ if (found) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ spin_lock_bh(&conn->session->back_lock);
+
+ protoitt = build_itt(get_itt(tmf_hdr->rtt),
+ conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+
+ spin_unlock_bh(&conn->session->back_lock);
+
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt),
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ dbg_cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt),
+ dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+ qedi_cmd->state = CLEANUP_RECV;
+
+ qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ }
+ } else if (qedi_conn->cmd_cleanup_req > 0) {
+ spin_lock_bh(&conn->session->back_lock);
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ spin_unlock_bh(&conn->session->back_lock);
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "task is null, itid=0x%x, cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ return;
+ }
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
+ cmd_new = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+ } else {
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ WARN_ON(1);
+ }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+ struct qedi_ctx *qedi = work->qedi;
+ union iscsi_cqe *cqe = &work->cqe;
+ struct iscsi_task *task = NULL;
+ struct iscsi_nopout *nopout_hdr;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 comp_type;
+ u32 iscsi_cid;
+ u32 hdr_opcode;
+ u16 que_idx = work->que_idx;
+ u8 cqe_err_bits = 0;
+
+ comp_type = cqe->cqe_common.cqe_type;
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+ cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+ if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+ return;
+ }
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return;
+ }
+
+ conn = q_conn->cls_conn->dd_data;
+
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits,
+ CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return;
+ }
+
+ switch (comp_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+ task = qedi_cmd->task;
+ if (!task) {
+ QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+ return;
+ }
+
+ /* Process NOPIN local completion */
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ if ((nopout_hdr->itt == RESERVED_ITT) &&
+ (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+ qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+ task, q_conn);
+ } else {
+ cqe->cqe_solicited.itid =
+ qedi_get_itt(cqe->cqe_solicited);
+ /* Process other solicited responses */
+ qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+ }
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_ASYNC_MSG:
+ qedi_process_async_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_REJECT:
+ qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ }
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_DUMMY:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+ conn);
+ goto exit_fp_process;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+ break;
+ }
+
+exit_fp_process:
+ return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+ u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+ struct iscsi_wqe *wqe;
+ struct iscsi_wqe_field *cont_field;
+ struct qedi_endpoint *ep;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_login_req *login_hdr;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+ ep = qedi_conn->ep;
+ wqe = &ep->sq[ep->sq_prod_idx];
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ if (is_cleanup) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ wqe->task_id = tid;
+ return;
+ }
+
+ if (ptu_invalidate) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+ ISCSI_WQE_SET_PTU_INVALIDATE);
+ }
+
+ cont_field = &wqe->cont_prevtid_union.cont_field;
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ case ISCSI_OP_TEXT:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ 1);
+ cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+ break;
+ case ISCSI_OP_LOGOUT:
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_SCSI_TMFUNC:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ break;
+ default:
+ if (!sc)
+ break;
+
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ cont_field->contlen_cdbsize_field =
+ (sc->sc_data_direction == DMA_TO_DEVICE) ?
+ scsi_bufflen(sc) : 0;
+ if (cmd->use_slowpath)
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+ else
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ (sc->sc_data_direction ==
+ DMA_TO_DEVICE) ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ break;
+ }
+
+ wqe->task_id = tid;
+ /* Make sure SQ data is coherent */
+ wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_db_data dbell = { 0 };
+
+ dbell.agg_flags = 0;
+
+ dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+ dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+ dbell.params |=
+ DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+ dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+ writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+ /* Make sure fw write idx is coherent, and include both memory barriers
+ * as a failsafe as for some architectures the call is the same but on
+ * others they are two different assembly operations.
+ */
+ wmb();
+ mmiowb();
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+ "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+ qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+ qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_login_req *login_hdr;
+ struct iscsi_login_req_hdr *fw_login_req = NULL;
+ struct iscsi_cached_sge_ctx *cached_sge = NULL;
+ struct iscsi_sge *single_sge = NULL;
+ struct iscsi_sge *req_sge = NULL;
+ struct iscsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+ fw_login_req->opcode = login_hdr->opcode;
+ fw_login_req->version_min = login_hdr->min_version;
+ fw_login_req->version_max = login_hdr->max_version;
+ fw_login_req->flags_attr = login_hdr->flags;
+ fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+ fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+ fw_login_req->tsih = login_hdr->tsih;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_login_req->cid = qedi_conn->iscsi_conn_id;
+ fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ fw_login_req->exp_stat_sn = 0;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(login_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(login_hdr->dlength);
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+ struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout *logout_hdr = NULL;
+ struct qedi_cmd *qedi_cmd = NULL;
+ s16 tid = 0;
+ s16 ptu_invalidate = 0;
+
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+ fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+ fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery)
+{
+ int rval;
+ struct iscsi_task *ctask;
+ struct qedi_cmd *cmd, *cmd_tmp;
+ struct iscsi_tm *tmf_hdr;
+ unsigned int lun = 0;
+ bool lun_reset = false;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ /* From recovery, task is NULL or from tmf resp valid task */
+ if (task) {
+ tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+ lun_reset = true;
+ lun = scsilun_to_int(&tmf_hdr->lun);
+ }
+ }
+
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+ qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+ in_recovery, lun_reset);
+
+ if (lun_reset)
+ spin_lock_bh(&session->back_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ ctask = cmd->task;
+ if (ctask == task)
+ continue;
+
+ if (lun_reset) {
+ if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(ctask->itt),
+ cmd->scsi_cmd, cmd->scsi_cmd->device,
+ ctask->state, cmd->state,
+ qedi_conn->iscsi_conn_id);
+ if (cmd->scsi_cmd->device->lun != lun)
+ continue;
+ }
+ }
+ qedi_conn->cmd_cleanup_req++;
+ qedi_iscsi_cleanup_task(ctask, true);
+
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+ &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+
+ if (lun_reset)
+ spin_unlock_bh(&session->back_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cmd_cleanup_req=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->iscsi_conn_id);
+
+ rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl) ||
+ qedi_conn->ep),
+ 5 * HZ);
+ if (rval) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ return 0;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_missing);
+ qedi_ops->common->drain(qedi->cdev);
+
+ /* Enable IOs for all other sessions except current.*/
+ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ (qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl),
+ 5 * HZ)) {
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+ return -1;
+ }
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+
+ return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot proceed, ep already disconnected, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+ qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+ qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fatal error, need hard reset, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ struct qedi_cmd *qedi_cmd,
+ struct qedi_work_map *list_work)
+{
+ struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+ int wait;
+
+ wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_cmd->state ==
+ CLEANUP_RECV) ||
+ ((qedi_cmd->type == TYPEIO) &&
+ (cmd->state ==
+ RESPONSE_RECEIVED))),
+ 5 * HZ);
+ if (!wait) {
+ qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ return -1;
+ }
+ return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_cls_session *cls_sess;
+ struct qedi_work_map *list_work = NULL;
+ struct iscsi_task *mtask;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ s16 rval = 0;
+ s16 tid = 0;
+
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+ goto abort_ret;
+ }
+
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+ qedi_conn->iscsi_conn_id);
+
+ if (do_not_recover) {
+ QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+ do_not_recover);
+ goto abort_ret;
+ }
+
+ list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ if (!list_work) {
+ QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ goto abort_ret;
+ }
+
+ qedi_cmd->type = TYPEIO;
+ list_work->qedi_cmd = qedi_cmd;
+ list_work->rtid = cmd->task_id;
+ list_work->state = QEDI_WORK_SCHEDULED;
+ qedi_cmd->list_tmf_work = list_work;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+ list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+ tmf_hdr->flags);
+
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ qedi_iscsi_cleanup_task(ctask, false);
+
+ rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+ list_work);
+ if (rval == -1) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "FW cleanup got escalated, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ qedi_cmd->task_id = tid;
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ return;
+
+ldel_exit:
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ if (!qedi_cmd->list_tmf_work) {
+ list_del_init(&list_work->list);
+ qedi_cmd->list_tmf_work = NULL;
+ kfree(list_work);
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_tmf_request_hdr *fw_tmf_request;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 tid = 0, ptu_invalidate = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+ tid = qedi_cmd->task_id;
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+ fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+ fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get reference task\n");
+ return 0;
+ }
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ fw_tmf_request->rtt =
+ qedi_set_itt(cmd->task_id,
+ get_itt(tmf_hdr->rtt));
+ } else {
+ fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ fw_tmf_request->opcode = tmf_hdr->opcode;
+ fw_tmf_request->function = tmf_hdr->flags;
+ fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+ tid, mtask->itt, qedi_conn->iscsi_conn_id);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ s16 tid = 0;
+
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd->task = mtask;
+
+ /* If abort task then schedule the work and return */
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ qedi_cmd->state = CLEANUP_WAIT;
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+ } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+ qedi_cmd->task_id = tid;
+
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_text_request_hdr *fw_text_request;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_text *text_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ text_hdr = (struct iscsi_text *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_text_request =
+ &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+ fw_text_request->opcode = text_hdr->opcode;
+ fw_text_request->flags_attr = text_hdr->flags;
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_text_request->ttt = text_hdr->ttt;
+ fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(text_hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_nop_out_hdr *fw_nop_out;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_nopout *nopout_hdr;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ return -ENOMEM;
+ }
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+ memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+ fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+ if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+ fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+ fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ } else {
+ fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+ }
+
+ fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+ fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+
+ while (sg_len) {
+ if (addr % QEDI_PAGE_SIZE)
+ frag_size =
+ (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+ else
+ frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+ (sg_len % QEDI_BD_SPLIT_SZ);
+
+ if (frag_size == 0)
+ frag_size = QEDI_BD_SPLIT_SZ;
+
+ bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+ bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+ QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+ "split sge %d: addr=%llx, len=%x",
+ (bd_index + sg_frags), addr, frag_size);
+
+ addr += (u64)frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ int sg_frags;
+ u64 addr, end_addr;
+ int i;
+
+ WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+
+ /*
+ * New condition to send single SGE as cached-SGL.
+ * Single SGE with length less than 64K.
+ */
+ sg = scsi_sglist(sc);
+ if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+
+ bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_count].sge_addr.hi = (addr >> 32);
+ bd[bd_count].sge_len = (u16)sg_len;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+ sg_count, addr, sg_len);
+
+ return ++bd_count;
+ }
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ end_addr = (addr + sg_len);
+
+ /*
+ * first sg elem in the 'list',
+ * check if end addr is page-aligned.
+ */
+ if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * last sg elem in the 'list',
+ * check if start addr is page-aligned.
+ */
+ else if ((i == (sg_count - 1)) &&
+ (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * middle sg elements in list,
+ * check if start and end addr is page-aligned
+ */
+ else if ((i != 0) && (i != (sg_count - 1)) &&
+ ((addr % QEDI_PAGE_SIZE) ||
+ (end_addr % QEDI_PAGE_SIZE)))
+ cmd->use_slowpath = true;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+ i, sg_len);
+
+ if (sg_len > QEDI_BD_SPLIT_SZ) {
+ sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+ } else {
+ sg_frags = 1;
+ bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+ bd[bd_count].sge_addr.hi = addr >> 32;
+ bd[bd_count].sge_len = sg_len;
+ }
+ byte_count += sg_len;
+ bd_count += sg_frags;
+ }
+
+ if (byte_count != scsi_bufflen(sc))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+ scsi_bufflen(sc));
+ else
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+ byte_count);
+
+ WARN_ON(byte_count != scsi_bufflen(sc));
+
+ return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+ int bd_count;
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+ if (bd_count == 0)
+ return;
+ } else {
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+ bd[0].sge_addr.lo = 0;
+ bd[0].sge_addr.hi = 0;
+ bd[0].sge_len = 0;
+ bd_count = 0;
+ }
+ cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+
+ lpcnt = sc->cmd_len / sizeof(dword);
+ srcp = (u8 *)sc->cmnd;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *)srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction)
+{
+ struct qedi_io_log *io_log;
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct scsi_cmnd *sc_cmd = task->sc;
+ unsigned long flags;
+ u8 op;
+
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+ io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+ io_log->direction = direction;
+ io_log->task_id = tid;
+ io_log->cid = qedi_conn->iscsi_conn_id;
+ io_log->lun = sc_cmd->device->lun;
+ io_log->op = sc_cmd->cmnd[0];
+ op = sc_cmd->cmnd[0];
+ io_log->lba[0] = sc_cmd->cmnd[2];
+ io_log->lba[1] = sc_cmd->cmnd[3];
+ io_log->lba[2] = sc_cmd->cmnd[4];
+ io_log->lba[3] = sc_cmd->cmnd[5];
+ io_log->bufflen = scsi_bufflen(sc_cmd);
+ io_log->sg_count = scsi_sg_count(sc_cmd);
+ io_log->fast_sgs = qedi->fast_sgls;
+ io_log->cached_sgs = qedi->cached_sgls;
+ io_log->slow_sgs = qedi->slow_sgls;
+ io_log->cached_sge = qedi->use_cached_sge;
+ io_log->slow_sge = qedi->use_slow_sge;
+ io_log->fast_sge = qedi->use_fast_sge;
+ io_log->result = sc_cmd->result;
+ io_log->jiffies = jiffies;
+ io_log->blk_req_cpu = smp_processor_id();
+
+ if (direction == QEDI_IO_TRACE_REQ) {
+ /* For requests we only care about the submission CPU */
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = 0;
+ io_log->blk_rsp_cpu = 0;
+ } else if (direction == QEDI_IO_TRACE_RSP) {
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = qedi->intr_cpu;
+ io_log->blk_rsp_cpu = smp_processor_id();
+ }
+
+ qedi->io_trace_idx++;
+ if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+ qedi->io_trace_idx = 0;
+
+ qedi->use_cached_sge = false;
+ qedi->use_slow_sge = false;
+ qedi->use_fast_sge = false;
+
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_phys_sgl_ctx *phys_sgl;
+ struct iscsi_virt_sgl_ctx *virt_sgl;
+ struct ystorm_iscsi_task_st_ctx *yst_cxt;
+ struct mstorm_iscsi_task_st_ctx *mst_cxt;
+ struct iscsi_sgl *sgl_struct;
+ struct iscsi_sge *single_sge;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ enum iscsi_task_type task_type;
+ struct iscsi_cmd_hdr *fw_cmd;
+ u32 lun[2];
+ u32 exp_data;
+ u16 cq_idx = smp_processor_id() % qedi->num_queues;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+ u8 num_fast_sgs;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ qedi_iscsi_map_sg_list(cmd);
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ if (conn->session->initial_r2t_en) {
+ exp_data = min((conn->session->imm_data_en *
+ conn->max_xmit_dlength),
+ conn->session->first_burst);
+ exp_data = min(exp_data, scsi_bufflen(sc));
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(exp_data);
+ } else {
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ min(conn->session->first_burst, scsi_bufflen(sc));
+ }
+
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ if (scsi_bufflen(sc))
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
+
+ fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+ fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, cmd);
+ fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+ fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ fw_cmd->opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+ fw_task_ctx->mstorm_st_context.sense_db.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.task_type = task_type;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ num_fast_sgs = (cmd->io_tbl.sge_valid ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+ cmd->io_tbl.sge_valid);
+
+ yst_cxt = &fw_task_ctx->ystorm_st_context;
+ mst_cxt = &fw_task_ctx->mstorm_st_context;
+ /* Tx path */
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ /* not considering superIO or FastIO */
+ if (cmd->io_tbl.sge_valid == 1) {
+ cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+ cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+ cached_sge->sge.sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+ phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ phys_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES,
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid));
+ virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+ virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ virt_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ virt_sgl->sgl_initial_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ } else {
+ /* Rx path */
+ if (cmd->io_tbl.sge_valid == 1) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ single_sge = &mst_cxt->sgl_union.single_sge;
+ single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+ single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+ single_sge->sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ sgl_struct->byte_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ }
+
+ if (cmd->io_tbl.sge_valid == 1)
+ /* Singel-SGL */
+ qedi->use_cached_sge = true;
+ else {
+ if (cmd->use_slowpath)
+ qedi->use_slow_sge = true;
+ else
+ qedi->use_fast_sge = true;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+ "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+ "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+ (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+ cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+ return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ s16 ptu_invalidate = 0;
+
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(task->itt), task->state,
+ cmd->state, qedi_conn->iscsi_conn_id);
+
+ qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644
index 000000000000..8e488de88ece
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+ bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644
index 000000000000..8ca44c78f093
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+ __le16 conn_id;
+ u8 invalid_command;
+ u8 cmd_hdr_type;
+ __le32 reserved1[2];
+ __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+ ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+ ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+ ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+ MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644
index 000000000000..d6a205433b66
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+ struct qedi_conn *qedi_conn;
+ int i;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi_conn = qedi_get_conn_from_id(qedi, i);
+ if (!qedi_conn)
+ continue;
+
+ qedi_start_conn_recovery(qedi, qedi_conn);
+ }
+
+ return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *shost = cmd->device->host;
+ struct qedi_ctx *qedi;
+
+ qedi = iscsi_host_priv(shost);
+
+ return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+ .proc_name = QEDI_MODULE_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .eh_host_reset_handler = qedi_eh_host_reset,
+ .target_alloc = iscsi_target_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = QEDI_MAX_ISCSI_TASK,
+ .this_id = -1,
+ .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+ .max_sectors = 0xffff,
+ .cmd_per_lun = 128,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ if (qedi_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.resp_bd_tbl,
+ qedi_conn->gen_pdu.resp_bd_dma);
+ qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ qedi_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_buf)
+ goto login_req_buf_failure;
+
+ qedi_conn->gen_pdu.req_buf_size = 0;
+ qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+ qedi_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_buf)
+ goto login_resp_buf_failure;
+
+ qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+ qedi_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_bd_tbl)
+ goto login_req_bd_tbl_failure;
+
+ qedi_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_bd_tbl)
+ goto login_resp_bd_tbl_failure;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+ "Allocation successful, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.sge_tbl)
+ dma_free_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_sge),
+ cmd->io_tbl.sge_tbl,
+ cmd->io_tbl.sge_tbl_dma);
+
+ if (cmd->sense_buffer)
+ dma_free_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer,
+ cmd->sense_buffer_dma);
+ }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+ struct qedi_cmd *cmd)
+{
+ struct qedi_io_bdt *io = &cmd->io_tbl;
+ struct iscsi_sge *sge;
+
+ io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(*sge),
+ &io->sge_tbl_dma, GFP_KERNEL);
+ if (!io->sge_tbl) {
+ iscsi_session_printk(KERN_ERR, session,
+ "Could not allocate BD table.\n");
+ return -ENOMEM;
+ }
+
+ io->sge_valid = 0;
+ return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (qedi_alloc_sget(qedi, session, cmd))
+ goto free_sgets;
+
+ cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!cmd->sense_buffer)
+ goto free_sgets;
+ }
+
+ return 0;
+
+free_sgets:
+ qedi_destroy_cmd_pool(qedi, session);
+ return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+ u16 qdepth, uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+
+ if (!ep)
+ return NULL;
+
+ qedi_ep = ep->dd_data;
+ shost = qedi_ep->qedi->shost;
+ qedi = iscsi_host_priv(shost);
+
+ if (cmds_max > qedi->max_sqes)
+ cmds_max = qedi->max_sqes;
+ else if (cmds_max < QEDI_SQ_WQES_MIN)
+ cmds_max = QEDI_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+ cmds_max, 0, sizeof(struct qedi_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup session for ep=%p\n", qedi_ep);
+ return NULL;
+ }
+
+ if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+ goto session_teardown;
+ }
+
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+ qedi_destroy_cmd_pool(qedi, session);
+ iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_conn *qedi_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+ cid);
+ if (!cls_conn) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+ cid, cls_session);
+ return NULL;
+ }
+
+ conn = cls_conn->dd_data;
+ qedi_conn = conn->dd_data;
+ qedi_conn->cls_conn = cls_conn;
+ qedi_conn->qedi = qedi;
+ qedi_conn->ep = NULL;
+ qedi_conn->active_cmd_count = 0;
+ INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+ spin_lock_init(&qedi_conn->list_lock);
+
+ if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+ cid, cls_session);
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+ iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+ iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+ if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n",
+ iscsi_cid);
+ return -EBUSY;
+ }
+
+ qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+ return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+ if (!qedi->cid_que.conn_cid_tbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= qedi->max_active_conns) {
+ QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_endpoint *qedi_ep;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ qedi_ep = ep->dd_data;
+ if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ qedi_ep->conn = qedi_conn;
+ qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+ qedi_conn->fw_cid = qedi_ep->fw_cid;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+ return -EINVAL;
+
+ spin_lock_init(&qedi_conn->tmf_work_lock);
+ INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+ init_waitqueue_head(&qedi_conn->wait_queue);
+ return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct qed_iscsi_params_update *conn_info;
+ struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ conn_info->update_flag = 0;
+
+ if (conn->hdrdgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+ if (conn->datadgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+ if (conn->session->initial_r2t_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+ true);
+ if (conn->session->imm_data_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+ true);
+
+ conn_info->max_seq_size = conn->session->max_burst;
+ conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+ conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+ conn_info->first_seq_length = conn->session->first_burst;
+ conn_info->exp_stat_sn = conn->exp_statsn;
+
+ rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+ conn_info);
+ if (rval) {
+ rval = -ENXIO;
+ QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+ goto update_conn_err;
+ }
+
+ kfree(conn_info);
+ rval = 0;
+
+update_conn_err:
+ return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+ u16 mss = 0;
+ u16 hdrs = TCP_HDR_LEN;
+
+ if (is_ipv6)
+ hdrs += IPV6_HDR_LEN;
+ else
+ hdrs += IPV4_HDR_LEN;
+
+ if (vlan_en)
+ hdrs += VLAN_LEN;
+
+ mss = pmtu - hdrs;
+
+ if (tcp_ts_en)
+ mss -= TCP_OPTION_LEN;
+
+ if (!mss)
+ mss = DEF_MSS;
+
+ return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+ struct qedi_ctx *qedi = qedi_ep->qedi;
+ struct qed_iscsi_params_offload *conn_info;
+ int rval;
+ int i;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate memory ep=%p\n", qedi_ep);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+ ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+ conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+ conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ conn_info->ip_version = 0;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ } else {
+ for (i = 1; i < 4; i++) {
+ conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+ conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+ }
+
+ conn_info->ip_version = 1;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ }
+
+ conn_info->src.port = qedi_ep->src_port;
+ conn_info->dst.port = qedi_ep->dst_port;
+
+ conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+ conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+ conn_info->vlan_id = qedi_ep->vlan_id;
+
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+ conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+ conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+ conn_info->dup_ack_theshold = 3;
+ conn_info->rcv_wnd = 65535;
+ conn_info->cwnd = DEF_MAX_CWND;
+
+ conn_info->ss_thresh = 65535;
+ conn_info->srtt = 300;
+ conn_info->rtt_var = 150;
+ conn_info->flow_label = 0;
+ conn_info->ka_timeout = DEF_KA_TIMEOUT;
+ conn_info->ka_interval = DEF_KA_INTERVAL;
+ conn_info->max_rt_time = DEF_MAX_RT_TIME;
+ conn_info->ttl = DEF_TTL;
+ conn_info->tos_or_tc = DEF_TOS;
+ conn_info->remote_port = qedi_ep->dst_port;
+ conn_info->local_port = qedi_ep->src_port;
+
+ conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+ (qedi_ep->ip_type == TCP_IPV6),
+ 1, (qedi_ep->vlan_id != 0));
+
+ conn_info->rcv_wnd_scale = 4;
+ conn_info->ts_ticks_per_second = 1000;
+ conn_info->da_timeout_value = 200;
+ conn_info->ack_frequency = 2;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Default cq index [%d], mss [%d]\n",
+ conn_info->default_cq, conn_info->mss);
+
+ rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+ rval, qedi_ep);
+
+ kfree(conn_info);
+ return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_ctx *qedi;
+ int rval;
+
+ qedi = qedi_conn->qedi;
+
+ rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_start: FW oflload conn failed.\n");
+ rval = -EINVAL;
+ goto start_err;
+ }
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_conn->abrt_conn = 0;
+
+ rval = iscsi_conn_start(cls_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "iscsi_conn_start: FW oflload conn failed!!\n");
+ }
+
+start_err:
+ return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+
+ qedi_conn_free_login_resources(qedi, qedi_conn);
+ iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct qedi_endpoint *qedi_ep = ep->dd_data;
+ int len;
+
+ if (!qedi_ep)
+ return -ENOTCONN;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (qedi_ep->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+ break;
+ default:
+ return -ENOTCONN;
+ }
+
+ return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct qedi_ctx *qedi;
+ int len;
+
+ qedi = iscsi_host_priv(shost);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, qedi->mac, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "host%d\n", shost->host_no);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (qedi->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi->src_ip);
+ else
+ len = sprintf(buf, "%pI6\n", qedi->src_ip);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qed_iscsi_stats iscsi_stats;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+ qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+ conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+ conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+ conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+ conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+ conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_sge *bd_tbl;
+
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+ bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+ qedi_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct qedi_cmd *cmd = task->dd_data;
+ struct qedi_conn *qedi_conn = cmd->conn;
+ char *buf;
+ int data_len;
+ int rc = 0;
+
+ qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ qedi_send_iscsi_login(qedi_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = qedi_conn->gen_pdu.req_buf_size;
+ buf = qedi_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ buf, data_len, 1);
+ else
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = qedi_send_iscsi_logout(qedi_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = qedi_iscsi_abort_work(qedi_conn, task);
+ break;
+ case ISCSI_OP_TEXT:
+ rc = qedi_send_iscsi_text(qedi_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "unsupported op 0x%x\n", task->hdr->opcode);
+ }
+
+ return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+ if (task->data_count) {
+ memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ qedi_conn->gen_pdu.req_wr_ptr =
+ qedi_conn->gen_pdu.req_buf + task->data_count;
+ }
+
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+
+ cmd->state = 0;
+ cmd->task = NULL;
+ cmd->use_slowpath = false;
+ cmd->conn = qedi_conn;
+ cmd->task = task;
+ cmd->io_cmd_in_list = false;
+ INIT_LIST_HEAD(&cmd->io_cmd);
+
+ if (!sc)
+ return qedi_mtask_xmit(conn, task);
+
+ cmd->scsi_cmd = sc;
+ return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct qedi_ctx *qedi;
+ struct iscsi_endpoint *ep;
+ struct qedi_endpoint *qedi_ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct qed_dev *cdev = NULL;
+ struct qedi_uio_dev *udev = NULL;
+ struct iscsi_path path_req;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ u32 iscsi_cid = QEDI_CID_RESERVED;
+ u16 len = 0;
+ char *buf = NULL;
+ int ret;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ERR_PTR(ret);
+ }
+
+ if (do_not_recover) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qedi = iscsi_host_priv(shost);
+ cdev = qedi->cdev;
+ udev = qedi->udev;
+
+ if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+ if (!ep) {
+ QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+ qedi_ep = ep->dd_data;
+ memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi_ep->iscsi_cid = (u32)-1;
+ qedi_ep->qedi = qedi;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+ sizeof(struct in_addr));
+ qedi_ep->dst_port = ntohs(addr->sin_port);
+ qedi_ep->ip_type = TCP_IPV4;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI4, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+ sizeof(struct in6_addr));
+ qedi_ep->dst_port = ntohs(addr6->sin6_port);
+ qedi_ep->ip_type = TCP_IPV6;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI6, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+ }
+
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ ret = -ENXIO;
+ goto ep_conn_exit;
+ }
+
+ ret = qedi_alloc_sq(qedi, qedi_ep);
+ if (ret)
+ goto ep_conn_exit;
+
+ ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+ &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+ ret = -ENXIO;
+ goto ep_free_sq;
+ }
+
+ iscsi_cid = qedi_ep->handle;
+ qedi_ep->iscsi_cid = iscsi_cid;
+
+ init_waitqueue_head(&qedi_ep->ofld_wait);
+ init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+ qedi_ep->state = EP_STATE_OFLDCONN_START;
+ qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+ buf = (char *)&path_req;
+ len = sizeof(path_req);
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64)qedi_ep->iscsi_cid;
+ path_req.pmtu = qedi->ll2_mtu;
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ } else {
+ memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ }
+
+ ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+ len);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+ iscsi_cid, ret);
+ goto ep_rel_conn;
+ }
+
+ atomic_inc(&qedi->num_offloads);
+ return ep;
+
+ep_rel_conn:
+ qedi->ep_tbl[iscsi_cid] = NULL;
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+ ret);
+ep_free_sq:
+ qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+
+ if (do_not_recover)
+ return 1;
+
+ qedi_ep = ep->dd_data;
+ if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ return -1;
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+ ret = 1;
+
+ ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+ QEDI_OFLD_WAIT_STATE(qedi_ep),
+ msecs_to_jiffies(timeout_ms));
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ ret = -1;
+
+ if (ret > 0)
+ return 1;
+ else if (!ret)
+ return 0;
+ else
+ return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+ struct qedi_cmd *cmd, *cmd_tmp;
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct qedi_endpoint *qedi_ep;
+ struct qedi_conn *qedi_conn = NULL;
+ struct iscsi_conn *conn = NULL;
+ struct qedi_ctx *qedi;
+ int ret = 0;
+ int wait_delay = 20 * HZ;
+ int abrt_conn = 0;
+ int count = 10;
+
+ qedi_ep = ep->dd_data;
+ qedi = qedi_ep->qedi;
+
+ flush_work(&qedi_ep->offload_work);
+
+ if (qedi_ep->conn) {
+ qedi_conn = qedi_ep->conn;
+ conn = qedi_conn->cls_conn->dd_data;
+ iscsi_suspend_queue(conn);
+ abrt_conn = qedi_conn->abrt_conn;
+
+ while (count--) {
+ if (!test_bit(QEDI_CONN_FW_CLEANUP,
+ &qedi_conn->flags)) {
+ break;
+ }
+ msleep(1000);
+ }
+
+ if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ if (do_not_recover) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Do not recover cid=0x%x\n",
+ qedi_ep->iscsi_cid);
+ goto ep_exit_recover;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+ qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+ qedi_cleanup_active_cmd_list(qedi_conn);
+ goto ep_release_conn;
+ }
+ }
+
+ if (do_not_recover)
+ goto ep_exit_recover;
+
+ switch (qedi_ep->state) {
+ case EP_STATE_OFLDCONN_START:
+ goto ep_release_conn;
+ case EP_STATE_OFLDCONN_FAILED:
+ break;
+ case EP_STATE_OFLDCONN_COMPL:
+ if (unlikely(!qedi_conn))
+ break;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+ qedi_conn->active_cmd_count, abrt_conn,
+ qedi_ep->state,
+ qedi_ep->iscsi_cid,
+ qedi_ep->conn
+ );
+
+ if (!qedi_conn->active_cmd_count)
+ abrt_conn = 0;
+ else
+ abrt_conn = 1;
+
+ if (abrt_conn)
+ qedi_clearsq(qedi, qedi_conn, NULL);
+ break;
+ default:
+ break;
+ }
+
+ qedi_ep->state = EP_STATE_DISCONN_START;
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ if (ret) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "destroy_conn failed returned %d\n", ret);
+ } else {
+ ret = wait_event_interruptible_timeout(
+ qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state !=
+ EP_STATE_DISCONN_START),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+ ret, wait_delay, qedi_ep->iscsi_cid);
+ }
+ }
+
+ep_release_conn:
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx,
+ "release_conn returned %d, cid=0x%x\n",
+ ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+ qedi_free_sq(qedi, qedi_ep);
+
+ if (qedi_conn)
+ qedi_conn->ep = NULL;
+
+ qedi_ep->conn = NULL;
+ qedi_ep->qedi = NULL;
+ atomic_dec(&qedi->num_offloads);
+
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+ struct qed_dev *cdev = qedi->cdev;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct sk_buff *skb;
+ u32 len;
+ int rc = 0;
+
+ udev = qedi->udev;
+ if (!udev) {
+ QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+ return -EINVAL;
+ }
+
+ uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+ if (!uctrl) {
+ QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+ return -EINVAL;
+ }
+
+ len = uctrl->host_tx_pkt_len;
+ if (!len) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+ return -EINVAL;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, udev->tx_pkt, len);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (vlanid)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+ rc = qedi_ops->ll2->start_xmit(cdev, skb);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+ rc);
+ kfree_skb(skb);
+ }
+
+ uctrl->host_tx_pkt_len = 0;
+ uctrl->hw_tx_cons++;
+
+ return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 20 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+ u32 iscsi_cid;
+ u16 port_id = 0;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ret;
+ }
+
+ if (strcmp(shost->hostt->proc_name, "qedi")) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost %s is invalid\n",
+ shost->hostt->proc_name);
+ return ret;
+ }
+
+ qedi = iscsi_host_priv(shost);
+ if (path_data->handle == QEDI_PATH_HANDLE) {
+ ret = qedi_data_avail(qedi, path_data->vlan_id);
+ goto set_path_exit;
+ }
+
+ iscsi_cid = (u32)path_data->handle;
+ qedi_ep = qedi->ep_tbl[iscsi_cid];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+ if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+ QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ ret = -EIO;
+ goto set_path_exit;
+ }
+
+ ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+ ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+ qedi_ep->vlan_id = path_data->vlan_id;
+ if (path_data->pmtu < DEF_PATH_MTU) {
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "MTU cannot be %u, using default MTU %u\n",
+ path_data->pmtu, qedi_ep->pmtu);
+ }
+
+ if (path_data->pmtu != qedi->ll2_mtu) {
+ if (path_data->pmtu > JUMBO_MTU) {
+ ret = -EINVAL;
+ QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+ goto set_path_exit;
+ }
+
+ qedi_reset_host_mtu(qedi, path_data->pmtu);
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ }
+
+ port_id = qedi_ep->src_port;
+ if (port_id >= QEDI_LOCAL_PORT_MIN &&
+ port_id < QEDI_LOCAL_PORT_MAX) {
+ if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+ port_id = 0;
+ } else {
+ port_id = 0;
+ }
+
+ if (!port_id) {
+ port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+ if (port_id == QEDI_LOCAL_PORT_INVALID) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate port id for iscsi_cid=0x%x\n",
+ iscsi_cid);
+ ret = -ENOMEM;
+ goto set_path_exit;
+ }
+ }
+
+ qedi_ep->src_port = port_id;
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ qedi->ip_type = TCP_IPV4;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ qedi->ip_type = TCP_IPV6;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ }
+
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+ ret = 0;
+
+set_path_exit:
+ return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return 0444;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
+ return 0444;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+ if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+ atomic_read(&task->refcount));
+ return;
+ }
+
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = QEDI_MODULE_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+ .create_session = qedi_session_create,
+ .destroy_session = qedi_session_destroy,
+ .create_conn = qedi_conn_create,
+ .bind_conn = qedi_conn_bind,
+ .start_conn = qedi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qedi_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_ep_param = qedi_ep_get_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = qedi_host_get_param,
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = qedi_conn_get_stats,
+ .xmit_task = qedi_task_xmit,
+ .cleanup_task = qedi_cleanup_task,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .ep_connect = qedi_ep_connect,
+ .ep_poll = qedi_ep_poll,
+ .ep_disconnect = qedi_ep_disconnect,
+ .set_path = qedi_set_path,
+ .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
+ cls_sess = iscsi_conn_to_session(cls_conn);
+
+ if (iscsi_is_session_online(cls_sess)) {
+ qedi_conn->abrt_conn = 1;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failing connection, state=0x%x, cid=0x%x\n",
+ conn->session->state, qedi_conn->iscsi_conn_id);
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ }
+}
+
+static const struct {
+ enum iscsi_error_types error_code;
+ char *err_string;
+} qedi_iscsi_error[] = {
+ { ISCSI_STATUS_NONE,
+ "tcp_error none"
+ },
+ { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ "task cid mismatch"
+ },
+ { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ "rq ring full"
+ },
+ { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ "cmdq ring full"
+ },
+ { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ "sge caching failed"
+ },
+ { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ "hdr digest error"
+ },
+ { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ "local cmpl error"
+ },
+ { ISCSI_CONN_ERROR_DATA_OVERRUN,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ "out of sge error"
+ },
+ { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ "tcp seg ip options error"
+ },
+ { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ "tcp ip fragment error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ "AHS len protocol error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ "itt out of range error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ "data seg more than pdu size"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ "invalid opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ "invalid opcode before update"
+ },
+ { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ "unexpected opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ "r2t carries no data"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ "data sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ "data TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ "r2t TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ "buffer offset error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ "buffer offset ooo"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ "data seg len 0"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ "data xer len error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ "data xer len1 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ "data xer len2 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ "protocol lun error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ "f bit zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ "exp stat sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ "dsl not zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ "invalid dsl"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ "data seg len too big"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ "outstanding r2t count error"
+ },
+ { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ "sense datalen error"
+ },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+ int i;
+ char *msg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+ if (qedi_iscsi_error[i].error_code == err_code) {
+ msg = qedi_iscsi_error[i].err_string;
+ break;
+ }
+ }
+ return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+ struct qedi_ctx *qedi;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char unknown_msg[] = "Unknown error";
+ char *message;
+ int need_recovery = 0;
+ u32 err_mask = 0;
+ char *msg;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ qedi = ep->qedi;
+
+ QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+ data->error_code);
+
+ if (err_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ msg = qedi_get_iscsi_error(data->error_code);
+ if (!msg) {
+ need_recovery = 0;
+ msg = unknown_msg;
+ }
+
+ iscsi_conn_printk(KERN_ALERT,
+ qedi_conn->cls_conn->dd_data,
+ "qedi: %s - %s\n", message, msg);
+
+ if (need_recovery)
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+ data->error_code);
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644
index 000000000000..d3c06bbddb4e
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT 7200000
+#define DEF_KA_INTERVAL 10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 8000
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 2
+#define DEF_PATH_MTU 1500
+#define DEF_MSS 1460
+#define DEF_LL2_MTU 1560
+#define JUMBO_MTU 9000
+
+#define MIN_MTU 576 /* rfc 793 */
+#define IPV4_HDR_LEN 20
+#define IPV6_HDR_LEN 40
+#define TCP_HDR_LEN 20
+#define TCP_OPTION_LEN 12
+#define VLAN_LEN 4
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_ACQRCONN_START = 0x1,
+ EP_STATE_ACQRCONN_COMPL = 0x2,
+ EP_STATE_OFLDCONN_START = 0x4,
+ EP_STATE_OFLDCONN_COMPL = 0x8,
+ EP_STATE_DISCONN_START = 0x10,
+ EP_STATE_DISCONN_COMPL = 0x20,
+ EP_STATE_CLEANUP_START = 0x40,
+ EP_STATE_CLEANUP_CMPL = 0x80,
+ EP_STATE_TCP_FIN_RCVD = 0x100,
+ EP_STATE_TCP_RST_RCVD = 0x200,
+ EP_STATE_LOGOUT_SENT = 0x400,
+ EP_STATE_LOGOUT_RESP_RCVD = 0x800,
+ EP_STATE_CLEANUP_FAILED = 0x1000,
+ EP_STATE_OFLDCONN_FAILED = 0x2000,
+ EP_STATE_CONNECT_FAILED = 0x4000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+ struct qedi_ctx *qedi;
+ u32 dst_addr[4];
+ u32 src_addr[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ u16 pmtu;
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ u8 ip_type;
+ int state;
+ wait_queue_head_t ofld_wait;
+ wait_queue_head_t tcp_ofld_wait;
+ u32 iscsi_cid;
+ /* identifier of the connection from qed */
+ u32 handle;
+ u32 fw_cid;
+ void __iomem *p_doorbell;
+
+ /* Send queue management */
+ struct iscsi_wqe *sq;
+ dma_addr_t sq_dma;
+
+ u16 sq_prod_idx;
+ u16 fw_sq_prod_idx;
+ u16 sq_con_idx;
+ u32 sq_mem_size;
+
+ void *sq_pbl;
+ dma_addr_t sq_pbl_dma;
+ u32 sq_pbl_size;
+ struct qedi_conn *conn;
+ struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN 16
+
+struct qedi_io_bdt {
+ struct iscsi_sge *sge_tbl;
+ dma_addr_t sge_tbl_dma;
+ u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *ep;
+ struct list_head active_cmd_list;
+ spinlock_t list_lock; /* internal conn lock */
+ u32 active_cmd_count;
+ u32 cmd_cleanup_req;
+ u32 cmd_cleanup_cmpl;
+
+ u32 iscsi_conn_id;
+ int itt;
+ int abrt_conn;
+#define QEDI_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+
+ struct list_head tmf_work_list;
+ wait_queue_head_t wait_queue;
+ spinlock_t tmf_work_lock; /* tmf work lock */
+ unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP 1
+};
+
+struct qedi_cmd {
+ struct list_head io_cmd;
+ bool io_cmd_in_list;
+ struct iscsi_hdr hdr;
+ struct qedi_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct qedi_io_bdt io_tbl;
+ struct iscsi_task_context request;
+ unsigned char *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ u16 task_id;
+
+ /* field populated for tmf work queue */
+ struct iscsi_task *task;
+ struct work_struct tmf_work;
+ int state;
+#define CLEANUP_WAIT 1
+#define CLEANUP_RECV 2
+#define CLEANUP_WAIT_FAILED 3
+#define CLEANUP_NOT_REQUIRED 4
+#define LUN_RESET_RESPONSE_RECEIVED 5
+#define RESPONSE_RECEIVED 6
+
+ int type;
+#define TYPEIO 1
+#define TYPERESET 2
+
+ struct qedi_work_map *list_tmf_work;
+ /* slowpath management */
+ bool use_slowpath;
+
+ struct iscsi_tm_rsp *tmf_resp_buf;
+ struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+ struct list_head list;
+ struct qedi_cmd *qedi_cmd;
+ int rtid;
+
+ int state;
+#define QEDI_WORK_QUEUED 1
+#define QEDI_WORK_SCHEDULED 2
+#define QEDI_WORK_EXIT 3
+
+ struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+ (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644
index 000000000000..19ead8d17e55
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -0,0 +1,2127 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+ " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ struct async_data *data;
+ int rval = 0;
+
+ if (!context || !fw_handle) {
+ QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+ return -EINVAL;
+ }
+
+ qedi = (struct qedi_ctx *)context;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+ data = (struct async_data *)fw_handle;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+ data->cid, data->itid, data->error_code,
+ data->fw_debug_param);
+
+ qedi_ep = qedi->ep_tbl[data->cid];
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot process event, ep already disconnected, cid=0x%x\n",
+ data->cid);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ switch (fw_event_code) {
+ case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+ qedi_ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+ qedi_process_iscsi_error(qedi_ep, data);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+ case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+ qedi_process_tcp_error(qedi_ep, data);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+ fw_event_code);
+ }
+
+ return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (udev->uio_dev != -1)
+ return -EBUSY;
+
+ rtnl_lock();
+ udev->uio_dev = iminor(inode);
+ qedi_reset_uio_rings(udev);
+ set_bit(UIO_DEV_OPENED, &qedi->flags);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ udev->uio_dev = -1;
+ clear_bit(UIO_DEV_OPENED, &qedi->flags);
+ qedi_ll2_free_skbs(qedi);
+ return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+ if (udev->ll2_ring) {
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+ }
+
+ if (udev->ll2_buf) {
+ free_pages((unsigned long)udev->ll2_buf, 2);
+ udev->ll2_buf = NULL;
+ }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ uio_unregister_device(&udev->qedi_uinfo);
+
+ __qedi_free_uio_rings(udev);
+
+ pci_dev_put(udev->pdev);
+ kfree(udev->uctrl);
+ kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ list_del_init(&udev->list);
+ __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+ struct qedi_ctx *qedi = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+
+ qedi = udev->qedi;
+ uctrl = udev->uctrl;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ uctrl->host_rx_cons = 0;
+ uctrl->hw_rx_prod = 0;
+ uctrl->hw_rx_bd_prod = 0;
+ uctrl->host_rx_bd_cons = 0;
+
+ memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+ memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+ int rc = 0;
+
+ if (udev->ll2_ring || udev->ll2_buf)
+ return rc;
+
+ /* Allocating memory for LL2 ring */
+ udev->ll2_ring_size = QEDI_PAGE_SIZE;
+ udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+ if (!udev->ll2_ring) {
+ rc = -ENOMEM;
+ goto exit_alloc_ring;
+ }
+
+ /* Allocating memory for Tx/Rx pkt buffer */
+ udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+ udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+ udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+ __GFP_ZERO, 2);
+ if (!udev->ll2_buf) {
+ rc = -ENOMEM;
+ goto exit_alloc_buf;
+ }
+ return rc;
+
+exit_alloc_buf:
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+exit_alloc_ring:
+ return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ int rc = 0;
+
+ list_for_each_entry(udev, &qedi_udev_list, list) {
+ if (udev->pdev == qedi->pdev) {
+ udev->qedi = qedi;
+ if (__qedi_alloc_uio_rings(udev)) {
+ udev->qedi = NULL;
+ return -ENOMEM;
+ }
+ qedi->udev = udev;
+ return 0;
+ }
+ }
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev) {
+ rc = -ENOMEM;
+ goto err_udev;
+ }
+
+ uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+ if (!uctrl) {
+ rc = -ENOMEM;
+ goto err_uctrl;
+ }
+
+ udev->uio_dev = -1;
+
+ udev->qedi = qedi;
+ udev->pdev = qedi->pdev;
+ udev->uctrl = uctrl;
+
+ rc = __qedi_alloc_uio_rings(udev);
+ if (rc)
+ goto err_uio_rings;
+
+ list_add(&udev->list, &qedi_udev_list);
+
+ pci_dev_get(udev->pdev);
+ qedi->udev = udev;
+
+ udev->tx_pkt = udev->ll2_buf;
+ udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+ return 0;
+
+ err_uio_rings:
+ kfree(uctrl);
+ err_uctrl:
+ kfree(udev);
+ err_udev:
+ return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = qedi->udev;
+ struct uio_info *uinfo;
+ int ret = 0;
+
+ if (!udev)
+ return -ENOMEM;
+
+ uinfo = &udev->qedi_uinfo;
+
+ uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+ uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+ uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+ uinfo->mem[1].size = udev->ll2_ring_size;
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+ uinfo->mem[2].size = udev->ll2_buf_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->name = "qedi_uio";
+ uinfo->version = QEDI_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = qedi_uio_open;
+ uinfo->release = qedi_uio_close;
+
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
+
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO registration failed\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int ret;
+
+ sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(struct status_block), &sb_phys,
+ GFP_KERNEL);
+ if (!sb_virt) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
+ return -ENOMEM;
+ }
+
+ ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+ sb_id, QED_SB_TYPE_STORAGE);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block initialization failed for id = %d.\n",
+ sb_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+ struct qed_sb_info *sb_info;
+ int id;
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ sb_info = &qedi->sb_array[id];
+ if (sb_info->sb_virt)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt,
+ sb_info->sb_phys);
+ }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+ kfree(qedi->fp_array);
+ kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+ int ret = 0;
+
+ qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qedi_fastpath), GFP_KERNEL);
+ if (!qedi->fp_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath fp array allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qed_sb_info), GFP_KERNEL);
+ if (!qedi->sb_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath sb array allocation failed.\n");
+ ret = -ENOMEM;
+ goto free_fp;
+ }
+
+ return ret;
+
+free_fp:
+ qedi_free_fp(qedi);
+ return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id;
+
+ memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->fp_array));
+ memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->sb_array));
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ fp->sb_info = &qedi->sb_array[id];
+ fp->sb_id = id;
+ fp->qedi = qedi;
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ "qedi", id);
+
+ /* fp_array[i] ---- irq cookie
+ * So init data which is needed in int ctx
+ */
+ }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id, ret = 0;
+
+ ret = qedi_alloc_fp(qedi);
+ if (ret)
+ goto err;
+
+ qedi_int_fp(qedi);
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "SB allocation and initialization failed.\n");
+ ret = -EIO;
+ goto err_init;
+ }
+ }
+
+ return 0;
+
+err_init:
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+err:
+ return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+ int i;
+
+ qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+ sizeof(u32), GFP_KERNEL);
+ if (!qedi->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+ sizeof(struct qedi_conn *),
+ GFP_KERNEL);
+ if (!qedi->cid_que.conn_cid_tbl) {
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+ qedi->cid_que.cid_q_prod_idx = 0;
+ qedi->cid_que.cid_q_cons_idx = 0;
+ qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+ qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi->cid_que.cid_que[i] = i;
+ qedi->cid_que.conn_cid_tbl[i] = NULL;
+ }
+
+ return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+
+ kfree(qedi->cid_que.conn_cid_tbl);
+ qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+ u16 start_id, u16 next)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = next;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+ u16 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = QEDI_LOCAL_PORT_INVALID;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = QEDI_LOCAL_PORT_INVALID;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ if (id == QEDI_LOCAL_PORT_INVALID)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+ kfree(qedi->ep_tbl);
+ qedi->ep_tbl = NULL;
+ qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+ u16 port_id;
+
+ qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+ sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+ if (!qedi->ep_tbl)
+ return -ENOMEM;
+ port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+ QEDI_LOCAL_PORT_MIN, port_id)) {
+ qedi_cm_free_mem(qedi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi = NULL;
+
+ shost = iscsi_host_alloc(&qedi_host_template,
+ sizeof(struct qedi_ctx), 0);
+ if (!shost) {
+ QEDI_ERR(NULL, "Could not allocate shost\n");
+ goto exit_setup_shost;
+ }
+
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = 16;
+ shost->transportt = qedi_scsi_transport;
+
+ qedi = iscsi_host_priv(shost);
+ memset(qedi, 0, sizeof(*qedi));
+ qedi->shost = shost;
+ qedi->dbg_ctx.host_no = shost->host_no;
+ qedi->pdev = pdev;
+ qedi->dbg_ctx.pdev = pdev;
+ qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+ qedi->max_sqes = QEDI_SQ_SIZE;
+
+ if (shost_use_blk_mq(shost))
+ shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+ return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct skb_work_list *work;
+ u32 prod;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+ "UIO DEV is not opened\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate work so dropping frame.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&work->list);
+ work->skb = skb;
+
+ if (skb_vlan_tag_present(skb))
+ work->vlan_id = skb_vlan_tag_get(skb);
+
+ if (work->vlan_id)
+ __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+ ++uctrl->hw_rx_prod_cnt;
+ prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+ if (prod != uctrl->host_rx_cons) {
+ uctrl->hw_rx_prod = prod;
+ spin_unlock_bh(&qedi->ll2_lock);
+ wake_up_process(qedi->ll2_recv_thread);
+ return 0;
+ }
+
+ spin_unlock_bh(&qedi->ll2_lock);
+ return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+ u16 vlan_id)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ struct qedi_rx_bd rxbd;
+ struct qedi_rx_bd *p_rxbd;
+ u32 rx_bd_prod;
+ void *pkt;
+ int len = 0;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+ pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+ len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+ memcpy(pkt, skb->data, len);
+
+ memset(&rxbd, 0, sizeof(rxbd));
+ rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+ rxbd.rx_pkt_len = len;
+ rxbd.vlan_id = vlan_id;
+
+ uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+ rx_bd_prod = uctrl->hw_rx_bd_prod;
+ p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+ p_rxbd += rx_bd_prod;
+
+ memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+ /* notify the iscsiuio about new packet */
+ uio_event_notify(&udev->qedi_uinfo);
+
+ return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+ struct skb_work_list *work, *work_tmp;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+ list_del(&work->list);
+ if (work->skb)
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+ struct skb_work_list *work, *work_tmp;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+ list) {
+ list_del(&work->list);
+ qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&qedi->ll2_lock);
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ u8 num_sq_pages;
+ u32 log_page_size;
+ int rval = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+ MIN_NUM_CPUS_MSIX(qedi));
+
+ num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+ qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ memset(&qedi->pf_params.iscsi_pf_params, 0,
+ sizeof(qedi->pf_params.iscsi_pf_params));
+
+ qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+ &qedi->hw_p_cpuq);
+ if (!qedi->p_cpuq) {
+ QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ rval = qedi_alloc_global_queues(qedi);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+ qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+ qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+ qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+ for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+ if ((1 << log_page_size) == PAGE_SIZE)
+ break;
+ }
+ qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+ qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+ (u64)qedi->hw_p_cpuq;
+
+ /* RQ BDQ initializations.
+ * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+ * rqe_log_size: 8 for 256B RQE
+ */
+ qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+ /* BDQ address and size */
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_dma;
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_num_entries;
+ qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+ /* cq_num_entries: num_tasks + rq_num_entries */
+ qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+ qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+ qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+ qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+ return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ size_t size = 0;
+
+ if (qedi->p_cpuq) {
+ size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+ pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ qedi->hw_p_cpuq);
+ }
+
+ qedi_free_global_queues(qedi);
+
+ kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+ if (link->link_up) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_UP);
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Link Down event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+ {
+ .link_update = qedi_link_update,
+ }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+ u16 que_idx, struct qedi_percpu_s *p)
+{
+ struct qedi_work *qedi_work;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 iscsi_cid;
+ int rc = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return -1;
+ }
+ conn = q_conn->cls_conn->dd_data;
+
+ switch (cqe->cqe_common.cqe_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+ if (!qedi_cmd) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+ qedi_cmd->cqe_work.qedi = qedi;
+ memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_cmd->cqe_work.que_idx = que_idx;
+ qedi_cmd->cqe_work.is_solicited = true;
+ list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ case ISCSI_CQE_TYPE_DUMMY:
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+ if (!qedi_work) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_work->list);
+ qedi_work->qedi = qedi;
+ memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_work->que_idx = que_idx;
+ qedi_work->is_solicited = false;
+ list_add_tail(&qedi_work->list, &p->work_list);
+ break;
+ default:
+ rc = -1;
+ QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+ }
+ return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ struct qedi_percpu_s *p = NULL;
+ struct global_queue *que;
+ u16 prod_idx;
+ unsigned long flags;
+ union iscsi_cqe *cqe;
+ int cpu;
+ int ret;
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ que = qedi->global_queues[fp->sb_id];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+ que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+ qedi->intr_cpu = fp->sb_id;
+ cpu = smp_processor_id();
+ p = &per_cpu(qedi_percpu, cpu);
+
+ if (unlikely(!p->iothread))
+ WARN_ON(1);
+
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (que->cq_cons_idx != prod_idx) {
+ cqe = &que->cq[que->cq_cons_idx];
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "cqe=%p prod_idx=%d cons_idx=%d.\n",
+ cqe, prod_idx, que->cq_cons_idx);
+
+ ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+ if (ret)
+ continue;
+
+ que->cq_cons_idx++;
+ if (que->cq_cons_idx == QEDI_CQ_SIZE)
+ que->cq_cons_idx = 0;
+ }
+ wake_up_process(p->iothread);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct global_queue *que;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ u16 prod_idx;
+
+ barrier();
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedi->global_queues[fp->sb_id];
+
+ /* prod idx wrap around uint16 */
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+ struct qedi_fastpath *fp = dev_id;
+ struct qedi_ctx *qedi = fp->qedi;
+ bool wake_io_thread = true;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+ wake_io_thread = qedi_process_completions(fp);
+ if (wake_io_thread) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "process already running\n");
+ }
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* Check for more work */
+ rmb();
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ else
+ goto process_again;
+
+ return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+ /* Cookie is qedi_ctx struct */
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+ QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM 0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->int_info.msix_cnt) {
+ for (i = 0; i < qedi->int_info.used_cnt; i++) {
+ synchronize_irq(qedi->int_info.msix[i].vector);
+ irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ NULL);
+ free_irq(qedi->int_info.msix[i].vector,
+ &qedi->fp_array[i]);
+ }
+ } else {
+ qedi_ops->common->simd_handler_clean(qedi->cdev,
+ QEDI_SIMD_HANDLER_NUM);
+ }
+
+ qedi->int_info.used_cnt = 0;
+ qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+ int i, rc, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ rc = request_irq(qedi->int_info.msix[i].vector,
+ qedi_msix_handler, 0, "qedi",
+ &qedi->fp_array[i]);
+
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+ qedi_sync_free_irqs(qedi);
+ return rc;
+ }
+ qedi->int_info.used_cnt++;
+ rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+ int rc = 0;
+
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+ if (rc)
+ goto exit_setup_int;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+ qedi->int_info.msix_cnt, num_online_cpus());
+
+ if (qedi->int_info.msix_cnt) {
+ rc = qedi_request_msix_irq(qedi);
+ goto exit_setup_int;
+ } else {
+ qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+ QEDI_SIMD_HANDLER_NUM,
+ qedi_simd_int_handler);
+ qedi->int_info.used_cnt = 1;
+ }
+
+exit_setup_int:
+ return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->bdq_pbl_list)
+ dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+ if (qedi->bdq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+ qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ if (qedi->bdq[i].buf_addr) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+ qedi->bdq[i].buf_addr,
+ qedi->bdq[i].buf_dma);
+ }
+ }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+ int i;
+ struct global_queue **gl = qedi->global_queues;
+
+ for (i = 0; i < qedi->num_queues; i++) {
+ if (!gl[i])
+ continue;
+
+ if (gl[i]->cq)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+ gl[i]->cq, gl[i]->cq_dma);
+ if (gl[i]->cq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+ gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+ kfree(gl[i]);
+ }
+ qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+ struct scsi_bd *pbl;
+ u64 *list;
+ dma_addr_t page;
+
+ /* Alloc dma memory for BDQ buffers */
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ qedi->bdq[i].buf_addr =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_BDQ_BUF_SIZE,
+ &qedi->bdq[i].buf_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq[i].buf_addr) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate BDQ buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Alloc dma memory for BDQ page buffer list */
+ qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+ qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+ qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+ qedi->rq_num_entries);
+
+ qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->bdq_pbl_mem_size,
+ &qedi->bdq_pbl_dma, GFP_KERNEL);
+ if (!qedi->bdq_pbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Populate BDQ PBL with physical and virtual address of individual
+ * BDQ buffers
+ */
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ pbl->address.hi =
+ cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+ pbl->address.lo =
+ cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, i);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl++;
+ }
+
+ /* Allocate list of PBL pages */
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq_pbl_list) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate list of PBL pages.\n");
+ return -ENOMEM;
+ }
+ memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+ /*
+ * Now populate PBL list with pages that contain pointers to the
+ * individual buffers.
+ */
+ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+ list = (u64 *)qedi->bdq_pbl_list;
+ page = qedi->bdq_pbl_list_dma;
+ for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+ *list = qedi->bdq_pbl_dma;
+ list++;
+ page += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+ u32 *list;
+ int i;
+ int status = 0, rc;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /*
+ * Number of global queues (CQ / RQ). This should
+ * be <= number of available MSIX vectors for the PF
+ */
+ if (!qedi->num_queues) {
+ QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+ return 1;
+ }
+
+ /* Make sure we allocated the PBL that will contain the physical
+ * addresses of our queues
+ */
+ if (!qedi->p_cpuq) {
+ status = 1;
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+ qedi->num_queues), GFP_KERNEL);
+ if (!qedi->global_queues) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate global queues array ptr memory\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "qedi->global_queues=%p.\n", qedi->global_queues);
+
+ /* Allocate DMA coherent buffers for BDQ */
+ rc = qedi_alloc_bdq(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
+ /* Allocate a CQ and an associated PBL for each MSI-X
+ * vector.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ qedi->global_queues[i] =
+ kzalloc(sizeof(*qedi->global_queues[0]),
+ GFP_KERNEL);
+ if (!qedi->global_queues[i]) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocation global queue %d.\n", i);
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues[i]->cq_mem_size =
+ (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+ qedi->global_queues[i]->cq_mem_size =
+ (qedi->global_queues[i]->cq_mem_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE) * sizeof(void *);
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_pbl_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq, 0,
+ qedi->global_queues[i]->cq_mem_size);
+
+ qedi->global_queues[i]->cq_pbl =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq PBL.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq_pbl, 0,
+ qedi->global_queues[i]->cq_pbl_size);
+
+ /* Create PBL */
+ num_pages = qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE;
+ page = qedi->global_queues[i]->cq_dma;
+ pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+ }
+
+ list = (u32 *)qedi->p_cpuq;
+
+ /*
+ * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+ * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
+ * to the physical address which contains an array of pointers to the
+ * physical addresses of the specific queue pages.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+ list++;
+ *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+ list++;
+
+ *list = (u32)0;
+ list++;
+ *list = (u32)((u64)0 >> 32);
+ list++;
+ }
+
+ return 0;
+
+mem_alloc_failure:
+ qedi_free_global_queues(qedi);
+ return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ int rval = 0;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ if (!ep)
+ return -EIO;
+
+ /* Calculate appropriate queue and PBL sizes */
+ ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+ ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+ ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+ ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
+ if (!ep->sq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue.\n");
+ rval = -ENOMEM;
+ goto out;
+ }
+ memset(ep->sq, 0, ep->sq_mem_size);
+
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
+ if (!ep->sq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue PBL.\n");
+ rval = -ENOMEM;
+ goto out_free_sq;
+ }
+ memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+ /* Create PBL */
+ num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+ page = ep->sq_dma;
+ pbl = (u32 *)ep->sq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+
+ return rval;
+
+out_free_sq:
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+out:
+ return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ if (ep->sq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+ ep->sq_pbl_dma);
+ if (ep->sq)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+ s16 tmp_idx;
+
+again:
+ tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+ MAX_ISCSI_TASK_ENTRIES);
+
+ if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+ QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+ tmp_idx = -1;
+ goto err_idx;
+ }
+
+ if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+ goto again;
+
+err_idx:
+ return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+ if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "FW task context, already cleared, tid=0x%x\n", idx);
+ WARN_ON(1);
+ }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *cmd)
+{
+ qedi->itt_map[tid].itt = proto_itt;
+ qedi->itt_map[tid].p_cmd = cmd;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+ qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+ u16 i;
+
+ for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+ if (qedi->itt_map[i].itt == itt) {
+ *tid = i;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Ref itt=0x%x, found at tid=0x%x\n",
+ itt, *tid);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+ *proto_itt = qedi->itt_map[tid].itt;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Get itt map tid [0x%x with proto itt[0x%x]",
+ tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+ struct qedi_cmd *cmd = NULL;
+
+ if (tid > MAX_ISCSI_TASK_ENTRIES)
+ return NULL;
+
+ cmd = qedi->itt_map[tid].p_cmd;
+ if (cmd->task_id != tid)
+ return NULL;
+
+ qedi->itt_map[tid].p_cmd = NULL;
+
+ return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+ qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+ sizeof(struct qedi_itt_map), GFP_KERNEL);
+ if (!qedi->itt_map) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate itt map array memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+ kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+ .rx_cb = qedi_ll2_rx,
+ .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+ struct qedi_percpu_s *p = arg;
+ struct qedi_work *work, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+ cond_resched();
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static void qedi_percpu_thread_create(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(qedi_percpu, cpu);
+
+ thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "qedi_thread/%d", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void qedi_percpu_thread_destroy(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+ struct qedi_work *work, *tmp;
+
+ p = &per_cpu(qedi_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+}
+
+static int qedi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ QEDI_ERR(NULL, "CPU %d online.\n", cpu);
+ qedi_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
+ qedi_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qedi_cpu_notifier = {
+ .notifier_call = qedi_cpu_callback,
+};
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+ struct qed_ll2_params params;
+
+ qedi_recover_all_conns(qedi);
+
+ qedi_ops->ll2->stop(qedi->cdev);
+ qedi_ll2_free_skbs(qedi);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+ qedi->ll2_mtu, mtu);
+ memset(&params, 0, sizeof(params));
+ qedi->ll2_mtu = mtu;
+ params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
+
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ qedi_sync_free_irqs(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->stop(qedi->cdev);
+ qedi_ops->ll2->stop(qedi->cdev);
+ }
+
+ if (mode == QEDI_MODE_NORMAL)
+ qedi_free_iscsi_pf_param(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+ qedi_ops->common->remove(qedi->cdev);
+ }
+
+ qedi_destroy_fp(qedi);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ qedi_release_cid_que(qedi);
+ qedi_cm_free_mem(qedi);
+ qedi_free_uio(qedi->udev);
+ qedi_free_itt(qedi);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
+
+ if (qedi->ll2_recv_thread) {
+ kthread_stop(qedi->ll2_recv_thread);
+ qedi->ll2_recv_thread = NULL;
+ }
+ qedi_ll2_free_skbs(qedi);
+ }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi;
+ struct qed_ll2_params params;
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+ bool is_vf = false;
+ char host_buf[16];
+ struct qed_link_params link_params;
+ struct qed_slowpath_params sp_params;
+ struct qed_probe_params qed_params;
+ void *task_start, *task_end;
+ int rc;
+ u16 tmp;
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi = qedi_host_alloc(pdev);
+ if (!qedi) {
+ rc = -ENOMEM;
+ goto exit_probe;
+ }
+ } else {
+ qedi = pci_get_drvdata(pdev);
+ }
+
+ memset(&qed_params, 0, sizeof(qed_params));
+ qed_params.protocol = QED_PROTOCOL_ISCSI;
+ qed_params.dp_module = dp_module;
+ qed_params.dp_level = dp_level;
+ qed_params.is_vf = is_vf;
+ qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+ if (!qedi->cdev) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+ goto free_host;
+ }
+
+ qedi->msix_count = MAX_NUM_MSIX_PF;
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
+ }
+ }
+
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ rc = qedi_prepare_fp(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+ goto free_pf_params;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+ sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+ sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+ sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+ strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+ rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+ goto stop_hw;
+ }
+
+ /* update_pf_params needs to be called before and after slowpath
+ * start
+ */
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ qedi_setup_int(qedi);
+ if (rc)
+ goto stop_iscsi_func;
+
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ /* Learn information crucial for qedi to progress */
+ rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+ if (rc)
+ goto stop_iscsi_func;
+
+ /* Record BDQ producer doorbell addresses */
+ qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+ qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "BDQ primary_prod=%p secondary_prod=%p.\n",
+ qedi->bdq_primary_prod,
+ qedi->bdq_secondary_prod);
+
+ /*
+ * We need to write the number of BDs in the BDQ we've preallocated so
+ * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+ * packet arrives.
+ */
+ qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Writing %d to primary and secondary BDQ doorbell registers.\n",
+ qedi->bdq_prod_idx);
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+
+ ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+ qedi->mac);
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+ qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+ memset(&params, 0, sizeof(params));
+ params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+ qedi->ll2_mtu = DEF_PATH_MTU;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ /* set up rx path */
+ INIT_LIST_HEAD(&qedi->ll2_skb_list);
+ spin_lock_init(&qedi->ll2_lock);
+ /* start qedi context */
+ spin_lock_init(&qedi->hba_lock);
+ spin_lock_init(&qedi->task_idx_lock);
+ }
+ qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+ (void *)qedi,
+ "qedi_ll2_thread");
+ }
+
+ rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+ qedi, qedi_iscsi_event_cb);
+ if (rc) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+ goto stop_slowpath;
+ }
+
+ task_start = qedi_get_task_mem(&qedi->tasks, 0);
+ task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Task context start=%p, end=%p block_size=%u.\n",
+ task_start, task_end, qedi->tasks.size);
+
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+ &qedi_dbg_fops);
+#endif
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+ QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not add iscsi host\n");
+ rc = -ENOMEM;
+ goto remove_host;
+ }
+
+ /* Allocate uio buffers */
+ rc = qedi_alloc_uio_rings(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO alloc ring failed err=%d\n", rc);
+ goto remove_host;
+ }
+
+ rc = qedi_init_uio(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO init failed, err=%d\n", rc);
+ goto free_uio;
+ }
+
+ /* host the array on iscsi_conn */
+ rc = qedi_setup_cid_que(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not setup cid que\n");
+ goto free_uio;
+ }
+
+ rc = qedi_cm_alloc_mem(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc cm memory\n");
+ goto free_cid_que;
+ }
+
+ rc = qedi_alloc_itt(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc itt memory\n");
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ if (!qedi->tmf_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start tmf thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+ qedi->offload_thread = create_workqueue(host_buf);
+ if (!qedi->offload_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start offload thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ /* F/w needs 1st task context memory entry for performance */
+ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+ atomic_set(&qedi->num_offloads, 0);
+ }
+
+ return 0;
+
+free_cid_que:
+ qedi_release_cid_que(qedi);
+free_uio:
+ qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+ qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+ qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+ qedi_free_iscsi_pf_param(qedi);
+free_host:
+ iscsi_host_free(qedi->shost);
+exit_probe:
+ return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+ __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static struct pci_driver qedi_pci_driver = {
+ .name = QEDI_MODULE_NAME,
+ .id_table = qedi_pci_tbl,
+ .probe = qedi_probe,
+ .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+ int rc = 0;
+ int ret;
+ struct qedi_percpu_s *p;
+ unsigned int cpu = 0;
+
+ qedi_ops = qed_get_iscsi_ops();
+ if (!qedi_ops) {
+ QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_0;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_init("qedi");
+#endif
+
+ qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+ if (!qedi_scsi_transport) {
+ QEDI_ERR(NULL, "Could not register qedi transport");
+ rc = -ENOMEM;
+ goto exit_qedi_init_1;
+ }
+
+ register_hotcpu_notifier(&qedi_cpu_notifier);
+
+ ret = pci_register_driver(&qedi_pci_driver);
+ if (ret) {
+ QEDI_ERR(NULL, "Failed to register driver\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_2;
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(qedi_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_create(cpu);
+
+ return rc;
+
+exit_qedi_init_2:
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+exit_qedi_init_0:
+ return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+ unsigned int cpu = 0;
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_destroy(cpu);
+
+ pci_unregister_driver(&qedi_pci_driver);
+ unregister_hotcpu_notifier(&qedi_cpu_notifier);
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644
index 000000000000..b10c48bd1428
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+ if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+ return sprintf(buf, "Online\n");
+ else
+ return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+ struct qed_link_output if_link;
+
+ qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+ return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state,
+ &dev_attr_speed,
+ NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644
index 000000000000..9543a1b139d4
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER 8
+#define QEDI_DRIVER_MINOR_VER 10
+#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c901f7..47eb4d545d13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
- uint16_t options = 0;
int cnt;
struct req_que *req = ha->req_q_map[0];
+ struct qla_qpair *qpair;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
- if (ha->flags.cpu_affinity_enabled) {
- req = ha->req_q_map[1];
- ql_dbg(ql_dbg_multiq, vha, 0xc000,
- "Request queue %p attached with "
- "VP[%d], cpu affinity =%d\n",
- req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
- goto vport_queue;
- } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ if (!ql2xmqsupport || !ha->npiv_info)
goto vport_queue;
+
/* Create a request queue in QoS mode for the vport */
for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
- qos);
- if (!ret)
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
- "Can't create request queue for VP[%d]\n",
+ "Can't create qpair for VP[%d]\n",
vha->vp_idx);
else {
ql_dbg(ql_dbg_multiq, vha, 0xc001,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
+ "Queue pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
ql_dbg(ql_dbg_user, vha, 0x7085,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
- req = ha->req_q_map[ret];
+ "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
+ req = qpair->req;
+ vha->qpair = qpair;
}
}
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
- if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+ if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
- "Queue delete failed.\n");
+ "Queue Pair delete failed.\n");
}
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34ddc432..21d9fb7fc887 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0191 | 0x0146 |
+ * | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
@@ -58,7 +58,7 @@
* | | | 0xb13a,0xb142 |
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
- * | MultiQ | 0xc00c | |
+ * | MultiQ | 0xc010 | |
* | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5236e3f2a06a..f7df01b76714 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -401,6 +401,7 @@ typedef struct srb {
uint16_t type;
char *name;
int iocbs;
+ struct qla_qpair *qpair;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*start_scsi_mq) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+#define QLA_ATIO_VECTOR 0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
struct qla_msix_entry {
int have_irq;
+ int in_use;
uint32_t vector;
uint16_t entry;
- struct rsp_que *rsp;
+ char name[30];
+ void *handle;
struct irq_affinity_notify irq_notify;
int cpuid;
};
@@ -2872,7 +2878,6 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
- struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+/*Queue pair data structure */
+struct qla_qpair {
+ spinlock_t qp_lock;
+ atomic_t ref_count;
+ /* distill these fields down to 'online=0/1'
+ * ha->flags.eeh_busy
+ * ha->flags.pci_channel_io_perm_failure
+ * base_vha->loop_state
+ */
+ uint32_t online:1;
+ /* move vha->flags.difdix_supported here */
+ uint32_t difdix_supported:1;
+ uint32_t delete_in_progress:1;
+
+ uint16_t id; /* qp number used with FW */
+ uint16_t num_active_cmd; /* cmds down at firmware */
+ cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+ uint16_t vp_idx; /* vport ID */
+
+ mempool_t *srb_mempool;
+
+ /* to do: New driver: move queues to here instead of pointers */
+ struct req_que *req;
+ struct rsp_que *rsp;
+ struct atio_que *atio;
+ struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+ struct qla_hw_data *hw;
+ struct work_struct q_work;
+ struct list_head qp_list_elem; /* vha->qp_list */
+};
+
/* Place holder for FW buffer parameters */
struct qlfc_fw {
void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
uint32_t chip_reset_done :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
- uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
uint32_t isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
uint8_t mqenable;
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
+ struct qla_qpair **queue_pair_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+ / sizeof(unsigned long)];
uint8_t max_req_queues;
uint8_t max_rsp_queues;
+ uint8_t max_qpairs;
+ struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
struct mutex vport_lock; /* Virtual port synchronization */
spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+ struct mutex mq_lock; /* multi-queue synchronization */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
+ uint32_t qpairs_available:1;
} flags;
atomic_t loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
#define FX00_TARGET_SCAN 24
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+#define QPAIR_ONLINE_CHECK_NEEDED 27
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
/* List of pending PLOGI acks, protected by hw lock */
struct list_head plogi_ack_list;
+ struct list_head qp_list;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
+ struct qla_qpair *qpair; /* base qpair */
unsigned long vp_flags;
#define VP_IDX_ACQUIRED 0 /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
scsi_qla_host_t *vha;
};
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
/*
* Macros to help code, maintain, etc.
*/
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
- atomic_inc(&__vha->vref_count); \
- mb(); \
- if (__vha->flags.delete_progress) { \
- atomic_dec(&__vha->vref_count); \
- __bail = 1; \
- } else { \
- __bail = 0; \
- } \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
- atomic_dec(&__vha->vref_count); \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+ atomic_dec(&__vha->vref_count); \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
+ atomic_inc(&__qpair->ref_count); \
+ mb(); \
+ if (__qpair->delete_in_progress) { \
+ atomic_dec(&__qpair->ref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
+ atomic_dec(&__qpair->ref_count); \
+
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c51d9f3359e3..afa0116a163b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -91,12 +91,17 @@ extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+ int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
/*
* Global Data in qla_os.c source file.
*/
extern char qla2x00_version_str[];
+extern struct kmem_cache *srb_cachep;
+
extern int ql2xlogintimeout;
extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
/*
* Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+ uint16_t, struct req_que *);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+ struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+ struct qla_msix_entry *, int);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
uint16_t, int, uint8_t);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int);
+ uint16_t, struct qla_qpair *);
+
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296b46a3..632d5f30386a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
if (req->outstanding_cmds)
return QLA_SUCCESS;
- if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
- (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ if (!IS_FWI2_CAPABLE(ha))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
struct req_que *req;
struct rsp_que *rsp;
- if (vha->hw->flags.cpu_affinity_enabled)
- req = vha->hw->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
rsp = req->rsp;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
rsp = req->rsp;
if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+ int rsp_id = 0;
+ int req_id = 0;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t qpair_id = 0;
+ struct qla_qpair *qpair = NULL;
+ struct qla_msix_entry *msix;
+
+ if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+ ql_log(ql_log_warn, vha, 0x00181,
+ "FW/Driver is not multi-queue capable.\n");
+ return NULL;
+ }
+
+ if (ql2xmqsupport) {
+ qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate memory for queue pair.\n");
+ return NULL;
+ }
+ memset(qpair, 0, sizeof(struct qla_qpair));
+
+ qpair->hw = vha->hw;
+
+ /* Assign available que pair id */
+ mutex_lock(&ha->mq_lock);
+ qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+ if (qpair_id >= ha->max_qpairs) {
+ mutex_unlock(&ha->mq_lock);
+ ql_log(ql_log_warn, vha, 0x0183,
+ "No resources to create additional q pair.\n");
+ goto fail_qid_map;
+ }
+ set_bit(qpair_id, ha->qpair_qid_map);
+ ha->queue_pair_map[qpair_id] = qpair;
+ qpair->id = qpair_id;
+ qpair->vp_idx = vp_idx;
+
+ for (i = 0; i < ha->msix_count; i++) {
+ msix = &ha->msix_entries[i];
+ if (msix->in_use)
+ continue;
+ qpair->msix = msix;
+ ql_log(ql_dbg_multiq, vha, 0xc00f,
+ "Vector %x selected for qpair\n", msix->vector);
+ break;
+ }
+ if (!qpair->msix) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Out of MSI-X vectors!.\n");
+ goto fail_msix;
+ }
+
+ qpair->msix->in_use = 1;
+ list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+ mutex_unlock(&ha->mq_lock);
+
+ /* Create response queue first */
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ if (!rsp_id) {
+ ql_log(ql_log_warn, vha, 0x0185,
+ "Failed to create response queue.\n");
+ goto fail_rsp;
+ }
+
+ qpair->rsp = ha->rsp_q_map[rsp_id];
+
+ /* Create request queue */
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ if (!req_id) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to create request queue.\n");
+ goto fail_req;
+ }
+
+ qpair->req = ha->req_q_map[req_id];
+ qpair->rsp->req = qpair->req;
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4)
+ qpair->difdix_supported = 1;
+ }
+
+ qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!qpair->srb_mempool) {
+ ql_log(ql_log_warn, vha, 0x0191,
+ "Failed to create srb mempool for qpair %d\n",
+ qpair->id);
+ goto fail_mempool;
+ }
+
+ /* Mark as online */
+ qpair->online = 1;
+
+ if (!vha->flags.qpairs_available)
+ vha->flags.qpairs_available = 1;
+
+ ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ ql_dbg(ql_dbg_init, vha, 0x0187,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ }
+ return qpair;
+
+fail_mempool:
+fail_req:
+ qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+ mutex_lock(&ha->mq_lock);
+ qpair->msix->in_use = 0;
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+fail_msix:
+ ha->queue_pair_map[qpair_id] = NULL;
+ clear_bit(qpair_id, ha->qpair_qid_map);
+ mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+ kfree(qpair);
+ return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+ int ret;
+ struct qla_hw_data *ha = qpair->hw;
+
+ qpair->delete_in_progress = 1;
+ while (atomic_read(&qpair->ref_count))
+ msleep(500);
+
+ ret = qla25xx_delete_req_que(vha, qpair->req);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+ ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+
+ mutex_lock(&ha->mq_lock);
+ ha->queue_pair_map[qpair->id] = NULL;
+ clear_bit(qpair->id, ha->qpair_qid_map);
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+ mempool_destroy(qpair->srb_mempool);
+ kfree(qpair);
+ mutex_unlock(&ha->mq_lock);
+
+ return QLA_SUCCESS;
+fail:
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index edc48f3b8230..44e404583c86 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -216,6 +216,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
}
static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ uint8_t bail;
+
+ QLA_QPAIR_MARK_BUSY(qpair, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(qpair->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+ mempool_free(sp, qpair->srb_mempool);
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
+static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 221ad8907893..58e49a3e1de8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -12,7 +12,6 @@
#include <scsi/scsi_tcq.h>
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
return (cont_pkt);
}
-static inline int
+inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct req_que *req)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
}
}
-struct qla2_sgx {
- dma_addr_t dma_addr; /* OUT */
- uint32_t dma_len; /* OUT */
-
- uint32_t tot_bytes; /* IN */
- struct scatterlist *cur_sg; /* IN */
-
- /* for book keeping, bzero on initial invocation */
- uint32_t bytes_consumed;
- uint32_t num_bytes;
- uint32_t tot_partial;
-
- /* for debugging */
- uint32_t num_sg;
- srb_t *sp;
-};
-
-static int
+int
qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
uint32_t *partial)
{
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline int
+inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where completion should happen */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
}
/* Setup device pointers. */
-
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_type_7 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- int affinity = cmd->request->cpu;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ cmd_pkt->task = TSK_SIMPLE;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+ struct qla_qpair *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Check for host side state */
+ if (!qpair->online) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ if (!qpair->difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ /* Only process protection or >16 cdb in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+ if (cmd->cmd_len <= 16)
+ return qla2xxx_start_scsi_mq(sp);
+ }
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
- affinity < ha->max_rsp_queues - 1)
- *rsp = ha->rsp_q_map[affinity + 1];
- else
- *rsp = ha->rsp_q_map[0];
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->timeout = cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
}
/* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 19f18485a854..5093ca9b02ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/t10-pi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
@@ -2871,41 +2872,6 @@ out:
}
static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- scsi_qla_host_t *vha;
- struct rsp_que *rsp;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
- uint32_t hccr = 0;
-
- rsp = (struct rsp_que *) dev_id;
- if (!rsp) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = rsp->hw;
- vha = pci_get_drvdata(ha->pdev);
-
- /* Clear the interrupt, if enabled, for this response queue */
- if (!ha->flags.disable_msix_handshake) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
- if (qla2x00_check_reg32_for_disconnect(vha, hccr))
- goto out;
- queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
scsi_qla_host_t *vha;
@@ -3001,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (unlikely(!ha->flags.disable_msix_handshake)) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -3008,69 +3003,28 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
{ "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+ { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
{ "qla2xxx (default)", qla82xx_msix_default },
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
- int i;
- struct qla_msix_entry *qentry;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
- for (i = 0; i < ha->msix_count; i++) {
- qentry = &ha->msix_entries[i];
- if (qentry->have_irq) {
- /* un-register irq cpu affinity notification */
- irq_set_affinity_notifier(qentry->vector, NULL);
- free_irq(qentry->vector, qentry->rsp);
- }
- }
- pci_disable_msix(ha->pdev);
- kfree(ha->msix_entries);
- ha->msix_entries = NULL;
- ha->flags.msix_enabled = 0;
- ql_dbg(ql_dbg_init, vha, 0x0042,
- "Disabled the MSI.\n");
-}
-
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR 2
int i, ret;
- struct msix_entry *entries;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
- if (!entries) {
- ql_log(ql_log_warn, vha, 0x00bc,
- "Failed to allocate memory for msix_entry.\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < ha->msix_count; i++)
- entries[i].entry = i;
-
- ret = pci_enable_msix_range(ha->pdev,
- entries, MIN_MSIX_COUNT, ha->msix_count);
+ ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3080,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
} else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
- "-- %d/%d\n Retry with %d vectors.\n",
- ha->msix_count, ret, ret);
+ "with %d vectors, using %d vectors.\n",
+ ha->msix_count, ret);
ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
+ /* Recalculate queue values */
+ if (ha->mqiobase && ql2xmqsupport) {
+ ha->max_req_queues = ha->msix_count - 1;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+ }
}
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
@@ -3097,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
+ qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->entry = i;
qentry->have_irq = 0;
- qentry->rsp = NULL;
+ qentry->in_use = 0;
+ qentry->handle = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
qentry = &ha->msix_entries[i];
- qentry->rsp = rsp;
+ qentry->handle = rsp;
rsp->msix = qentry;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3122,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
+ qentry->in_use = 1;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3141,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* queue.
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- qentry = &ha->msix_entries[ATIO_VECTOR];
- qentry->rsp = rsp;
+ qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
+ qentry->handle = rsp;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[QLA_ATIO_VECTOR].name);
+ qentry->in_use = 1;
ret = request_irq(qentry->vector,
- qla83xx_msix_entries[ATIO_VECTOR].handler,
- 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ msix_entries[QLA_ATIO_VECTOR].handler,
+ 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
}
@@ -3155,7 +3129,7 @@ msix_register_fail:
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
- qla24xx_disable_msix(ha);
+ qla2x00_free_irqs(vha);
ha->mqenable = 0;
goto msix_out;
}
@@ -3163,11 +3137,13 @@ msix_register_fail:
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
} else
- if (ha->mqiobase
- && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ if (ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3177,7 +3153,6 @@ msix_register_fail:
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
- kfree(entries);
return ret;
}
@@ -3230,7 +3205,7 @@ skip_msix:
!IS_QLA27XX(ha))
goto skip_msi;
- ret = pci_enable_msi(ha->pdev);
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
if (!ret) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
@@ -3275,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp;
+ struct qla_msix_entry *qentry;
+ int i;
/*
* We need to check that ha->rsp_q_map is valid in case we are called
@@ -3284,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
return;
rsp = ha->rsp_q_map[0];
- if (ha->flags.msix_enabled)
- qla24xx_disable_msix(ha);
- else if (ha->flags.msi_enabled) {
- free_irq(ha->pdev->irq, rsp);
- pci_disable_msi(ha->pdev);
- } else
- free_irq(ha->pdev->irq, rsp);
-}
+ if (ha->flags.msix_enabled) {
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ if (qentry->have_irq) {
+ irq_set_affinity_notifier(qentry->vector, NULL);
+ free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+ }
+ }
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled MSI-X.\n");
+ } else {
+ free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+ }
+ pci_free_irq_vectors(ha->pdev);
+}
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+ struct qla_msix_entry *msix, int vector_type)
{
- struct qla_hw_data *ha = rsp->hw;
- struct qla_init_msix_entry *intr = &msix_entries[2];
- struct qla_msix_entry *msix = rsp->msix;
+ struct qla_init_msix_entry *intr = &msix_entries[vector_type];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
- ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ scnprintf(msix->name, sizeof(msix->name),
+ "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+ ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
if (ret) {
ql_log(ql_log_fatal, vha, 0x00e6,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3310,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
return ret;
}
msix->have_irq = 1;
- msix->rsp = rsp;
+ msix->handle = qpair;
return ret;
}
@@ -3323,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
+ struct rsp_que *rsp = e->handle;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
- ha = e->rsp->hw;
+ ha = rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3351,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
- struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+ struct rsp_que *rsp = e->handle;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d \n", __func__,
+ "%s: host%ld: vector %d cpu %d\n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c998699..2819ceb96041 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,43 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+struct rom_cmd {
+ uint16_t cmd;
+} rom_cmds[] = {
+ { MBC_LOAD_RAM },
+ { MBC_EXECUTE_FIRMWARE },
+ { MBC_READ_RAM_WORD },
+ { MBC_MAILBOX_REGISTER_TEST },
+ { MBC_VERIFY_CHECKSUM },
+ { MBC_GET_FIRMWARE_VERSION },
+ { MBC_LOAD_RISC_RAM },
+ { MBC_DUMP_RISC_RAM },
+ { MBC_LOAD_RISC_RAM_EXTENDED },
+ { MBC_DUMP_RISC_RAM_EXTENDED },
+ { MBC_WRITE_RAM_WORD_EXTENDED },
+ { MBC_READ_RAM_EXTENDED },
+ { MBC_GET_RESOURCE_COUNTS },
+ { MBC_SET_FIRMWARE_OPTION },
+ { MBC_MID_INITIALIZE_FIRMWARE },
+ { MBC_GET_FIRMWARE_STATE },
+ { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+ { MBC_GET_RETRY_COUNT },
+ { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+ int i;
+ struct rom_cmd *wc;
+
+ for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+ wc = rom_cmds + i;
+ if (wc->cmd == cmd)
+ return 1;
+ }
+
+ return 0;
+}
/*
* qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ /* check if ISP abort is active and return cmd with timeout */
+ if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) {
+ ql_log(ql_log_info, vha, 0x1005,
+ "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ wait_time = jiffies;
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (time_after(jiffies, wait_time + 5 * HZ))
+ ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+ command, jiffies_to_msecs(jiffies - wait_time));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+ else
+ req = vha->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->vp_idx && vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
- if (ql2xmaxqueues > 1)
- req = ha->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ struct qla_qpair *qpair;
vha = fcport->vha;
ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- rsp = ha->rsp_q_map[tag + 1];
- else
+ if (vha->vp_idx && vha->qpair) {
+ /* NPIV port */
+ qpair = vha->qpair;
+ rsp = qpair->rsp;
+ req = qpair->req;
+ } else {
rsp = req->rsp;
+ }
+
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..c6d6f0d912ff 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
- free_irq(rsp->msix->vector, rsp);
+ free_irq(rsp->msix->vector, rsp->msix->handle);
rsp->msix->have_irq = 0;
- rsp->msix->rsp = NULL;
+ rsp->msix->in_use = 0;
+ rsp->msix->handle = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
return ret;
}
-static int
+int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair, *tqpair;
- /* Delete request queues */
- for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
- req = ha->req_q_map[cnt];
- if (req && test_bit(cnt, ha->req_qid_map)) {
- ret = qla25xx_delete_req_que(vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00ea,
- "Couldn't delete req que %d.\n",
- req->id);
- return ret;
+ if (ql2xmqsupport) {
+ list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+ qp_list_elem)
+ qla2xxx_delete_qpair(vha, qpair);
+ } else {
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
+ return ret;
+ }
}
}
- }
- /* Delete response queues */
- for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
- rsp = ha->rsp_q_map[cnt];
- if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00eb,
- "Couldn't delete rsp que %d.\n",
- rsp->id);
- return ret;
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
+ return ret;
+ }
}
}
}
+
return ret;
}
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS)
goto que_failed;
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req->out_ptr = (void *)(req->ring + req->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
@@ -741,20 +750,20 @@ failed:
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
- struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
struct scsi_qla_host *vha;
- struct qla_hw_data *ha = rsp->hw;
+ struct qla_hw_data *ha = qpair->hw;
- spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ spin_lock_irqsave(&qpair->qp_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- qla24xx_process_response_queue(vha, rsp);
- spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int req)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
{
int ret = 0;
struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
- if (ha->flags.msix_enabled)
- rsp->msix = &ha->msix_entries[que_id + 1];
- else
- ql_log(ql_log_warn, base_vha, 0x00e3,
- "MSIX not enabled.\n");
+ rsp->msix = qpair->msix;
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
- "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
+ /* Set option to indicate response queue creation */
+ options |= BIT_1;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(rsp);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
if (ret)
goto que_failed;
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
- if (req >= 0)
- rsp->req = ha->req_q_map[req];
- else
- rsp->req = NULL;
+ rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
- if (rsp->hw->wq)
- INIT_WORK(&rsp->q_work, qla_do_work);
+ if (qpair->hw->wq)
+ INIT_WORK(&qpair->q_work, qla_do_work);
return rsp->id;
que_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 56d6142852a5..8521cfe302e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
/*
* SRB allocation cache
*/
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
/*
* CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
- "Enables MQ settings "
- "Default is 1 for single queue. Set it to number "
- "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
- "Enables CPU affinity settings for the driver "
- "Default is 0 for no affinity of request and response IO. "
- "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+ "Enable on demand multiple queue pairs support "
+ "Default is 1 for supported. "
+ "Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
+ .map_queues = qla2xxx_map_queues,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+
+ if (ql2xmqsupport && ha->max_qpairs) {
+ ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->queue_pair_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for queue pair ptrs.\n");
+ goto fail_qpair_map;
+ }
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ }
+
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
+fail_base_qpair:
+ kfree(ha->queue_pair_map);
+fail_qpair_map:
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
fail_rsp_map:
kfree(ha->req_q_map);
ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
struct req_que *req;
struct rsp_que *rsp;
int cnt;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->req_q_map[cnt] = NULL;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_req_que(ha, req);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
kfree(ha->req_q_map);
ha->req_q_map = NULL;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->rsp_q_map[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- kfree(ha->rsp_q_map);
- ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
- uint16_t options = 0;
- int ques, req, ret;
- struct qla_hw_data *ha = vha->hw;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (!(ha->fw_attributes & BIT_6)) {
- ql_log(ql_log_warn, vha, 0x00d8,
- "Firmware is not multi-queue capable.\n");
- goto fail;
- }
- if (ql2xmultique_tag) {
- /* create a request queue for IO */
- options |= BIT_7;
- req = qla25xx_create_req_que(ha, options, 0, 0, -1,
- QLA_DEFAULT_QUE_QOS);
- if (!req) {
- ql_log(ql_log_warn, vha, 0x00e0,
- "Failed to create request queue.\n");
- goto fail;
- }
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- vha->req = ha->req_q_map[req];
- options |= BIT_1;
- for (ques = 1; ques < ha->max_rsp_queues; ques++) {
- ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
- if (!ret) {
- ql_log(ql_log_warn, vha, 0x00e8,
- "Failed to create response queue.\n");
- goto fail2;
- }
- }
- ha->flags.cpu_affinity_enabled = 1;
- ql_dbg(ql_dbg_multiq, vha, 0xc007,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- }
- return 0;
-fail2:
- qla25xx_delete_queues(vha);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- vha->req = ha->req_q_map[0];
-fail:
- ha->mqenable = 0;
- kfree(ha->req_q_map);
kfree(ha->rsp_q_map);
- ha->max_req_queues = ha->max_rsp_queues = 1;
- return 1;
+ ha->rsp_q_map = NULL;
}
static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
-static void
+void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
+
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ }
+
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ WARN_ON(atomic_read(&sp->ref_count) == 0);
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ cmd->scsi_done(cmd);
+}
+
/* If we are SP1 here, we need to still take and release the host_lock as SP1
* does not have the changes necessary to avoid taking host->host_lock.
*/
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
+ struct qla_qpair *qpair = NULL;
+ uint32_t tag;
+ uint16_t hwq;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
+ if (ha->mqenable) {
+ if (shost_use_blk_mq(vha->host)) {
+ tag = blk_mq_unique_tag(cmd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ qpair = ha->queue_pair_map[hwq];
+ } else if (vha->vp_idx && vha->qpair) {
+ qpair = vha->qpair;
+ }
+
+ if (qpair)
+ return qla2xxx_mqueuecommand(host, cmd, qpair);
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
return 0;
}
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ cmd->result = rval;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
+ goto qc24_fail_command;
+ }
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+ goto qc24_target_busy;
+ }
+
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto qc24_host_busy;
+
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2xxx_qpair_sp_free_dma;
+ sp->done = qla2xxx_qpair_sp_compl;
+ sp->qpair = qpair;
+
+ rval = ha->isp_ops->start_scsi_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ if (rval == QLA_INTERFACE_ERROR)
+ goto qc24_fail_command;
+ goto qc24_host_busy_free_sp;
+ }
+
+ return 0;
+
+qc24_host_busy_free_sp:
+ qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
"MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
+ ha->msix_count = msix + 1;
/* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+ /* Queue pairs is the max value minus the base queue pair */
+ ha->max_qpairs = ha->max_rsp_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
"BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
qla83xx_iospace_config(struct qla_hw_data *ha)
{
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
+ ha->msix_count = msix + 1;
+ /*
+ * By default, driver uses at least two msix vectors
+ * (default & rspq)
+ */
+ if (ql2xmqsupport) {
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ /* Queue pairs is the max value minus
+ * the base queue pair */
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
"BAR 1 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ if (QLA_TGT_MODE_ENABLED())
+ ha->msix_count++;
qlt_83xx_iospace_config(ha);
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla8044_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ int i;
+
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X irq %d iobase 0x%p.\n",
pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
+ mutex_init(&ha->mq_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
-que_init:
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+ goto probe_init_failed;
+
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
goto probe_init_failed;
}
- qlt_probe_one_stage1(base_vha, ha);
+ if (ha->mqenable && shost_use_blk_mq(host)) {
+ /* number of hardware queues supported by blk/scsi-mq*/
+ host->nr_hw_queues = ha->max_qpairs;
- /* Set up the irqs */
- ret = qla2x00_request_irqs(ha, rsp);
- if (ret)
- goto probe_init_failed;
+ ql_dbg(ql_dbg_init, base_vha, 0x0192,
+ "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+ } else
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+
+ qlt_probe_one_stage1(base_vha, ha);
pci_save_state(pdev);
@@ -2842,11 +3013,12 @@ que_init:
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable) {
- if (qla25xx_setup_mode(base_vha)) {
- ql_log(ql_log_warn, base_vha, 0x00ec,
- "Failed to create queues, falling back to single queue mode.\n");
- goto que_init;
+ if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ /* Create start of day qpairs for Block MQ */
+ if (shost_use_blk_mq(host)) {
+ for (i = 0; i < ha->max_qpairs; i++)
+ qla2xxx_create_qpair(base_vha, 5, 0);
}
}
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
static void
qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
{
- /* Flush the work queue and remove it */
- if (ha->wq) {
- flush_workqueue(ha->wq);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- }
-
/* Cancel all work and destroy DPC workqueues */
if (ha->dpc_lp_wq) {
cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
}
+ qla2x00_free_fcports(vha);
+
qla2x00_free_irqs(vha);
- qla2x00_free_fcports(vha);
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
qla2x00_mem_free(ha);
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
+ INIT_LIST_HEAD(&vha->qp_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
base_vha->flags.init_done = 0;
qla25xx_delete_queues(base_vha);
- qla2x00_free_irqs(base_vha);
qla2x00_free_fcports(base_vha);
+ qla2x00_free_irqs(base_vha);
qla2x00_mem_free(ha);
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
{
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ uint32_t online;
+ struct qla_qpair *qpair;
ha = (struct qla_hw_data *)data;
base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
ha->isp_ops->beacon_blink(base_vha);
}
+ /* qpair online check */
+ if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+ &base_vha->dpc_flags)) {
+ if (ha->flags.eeh_busy ||
+ ha->flags.pci_channel_io_perm_failure)
+ online = 0;
+ else
+ online = 1;
+
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list,
+ qp_list_elem)
+ qpair->online = online;
+ mutex_unlock(&ha->mq_lock);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
qla83xx_wr_reg(vha, reg, data);
}
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+ return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 07349270535d..82dfe07b1d47 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aa43bfea0d00..abe617372661 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -23,6 +23,7 @@
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@@ -1031,6 +1032,34 @@ out:
return ret;
}
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ return err;
+}
+
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
*/
host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
+ if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+ __func__, err);
+ goto out_variant_clear;
+ } else if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
- ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a57ce2..fe517cd7dac3 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7983058f3f7..08b799d4efcc 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_NO_FASTAUTO),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
- END_FIX
-};
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ef8548c3a423..a2c2817fc566 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == UFSHCI_VERSION_10)
- return INTERRUPT_MASK_ALL_VER_10;
- else
- return INTERRUPT_MASK_ALL_VER_11;
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ }
+
+ return intr_mask;
}
/**
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
}
/**
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7d9ff22acfea..08cd26ed2382 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
* @setup_task_mgmt: called before any task management request is issued
* to set some things
* @hibern8_notify: called around hibern8 enter/exit
- * to configure some things
+ * @apply_dev_quirks: called to apply device specific quirks
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
void (*setup_xfer_req)(struct ufs_hba *, int, bool);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
return hba->vops->hibern8_notify(hba, cmd, status);
}
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->apply_dev_quirks)
+ return hba->vops->apply_dev_quirks(hba);
+ return 0;
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5d978867be57..8c5190e2e1c9 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,10 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
/* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
/* Interrupt disable mask for UFSHCI v1.1 */
INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/*
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index d02bf58aea6d..8bcb9b71f764 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/unaligned.h>
+#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "cxgbit.h"
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b7d747e92c7a..da2c73a255de 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -23,7 +23,9 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/idr.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
+#include <net/ipv6.h>
#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 4cf2c0f2ba2f..e0db2ceb0f87 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -1,6 +1,18 @@
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsi_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index e116f0e845c0..903b667f8e01 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d22f7b96a06c..1b91c13cc965 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,8 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#include <linux/types.h>
+
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
@@ -18,6 +20,9 @@
#define CHAP_STAGE_CLIENT_NRIC 4
#define CHAP_STAGE_SERVER_NR 5
+struct iscsi_node_auth;
+struct iscsi_conn;
+
extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
int *, int *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 923c032f0b95..bf40f03755dd 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -21,10 +21,11 @@
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
@@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
tpg_np_new = iscsit_tpg_add_network_portal(tpg,
&np->np_sockaddr, tpg_np, type);
- if (IS_ERR(tpg_np_new))
+ if (IS_ERR(tpg_np_new)) {
+ rc = PTR_ERR(tpg_np_new);
goto out;
+ }
} else {
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new) {
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 647d4a5dca52..173ddd93c757 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -16,8 +16,8 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
index 646429ac5a02..16edeeeb7777 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.h
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
+struct iscsi_cmd;
+struct iscsi_datain;
+
extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index a0e2df9e8090..06dbff5cd520 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
+struct iscsi_cmd;
+struct iscsi_session;
+
extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f9497fb2..60e69e2af6ed 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_session;
+
extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 9214c9dafa2b..fe9b7f1e44ac 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <linux/list.h>
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 2a3ebf118a34..54d36bd25bea 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -1,6 +1,16 @@
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsi_session;
+
extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index e24f1c7c5862..faf9ae014b30 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -17,6 +17,7 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 63f2501f3fe0..7965f1e86506 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 15f79a2ca34a..450f51deb2a2 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -20,6 +20,8 @@
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/tcp.h> /* TCP_NODELAY */
+#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index b597aa2c61a1..0e1fd6cedd54 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
+#include <linux/types.h>
+
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 89d34bd6d87f..46388c9e08da 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -18,6 +18,8 @@
#include <linux/ctype.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index f021cbd330e5..53438bfca4c6 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,10 @@
#define DECIMAL 0
#define HEX 1
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index 0c69a46a62ec..79cdf06ade48 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,6 +1,11 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 0efa80bb8962..e65bf78ceef3 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/slab.h>
-
+#include <linux/uio.h> /* struct kvec */
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_parameters.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3f0813..9962ccf0ccd7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,6 +1,7 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <linux/types.h>
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
@@ -23,6 +24,11 @@ struct iscsi_param {
struct list_head p_list;
} ____cacheline_aligned;
+struct iscsi_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index d5b153751a8d..be1234362271 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
+#include <linux/types.h>
+#include <linux/cache.h>
+
/* struct iscsi_pdu->status */
#define DATAOUT_PDU_SENT 1
@@ -78,6 +81,8 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
+struct iscsi_cmd;
+
extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
index 142e992cb097..64cc5c07e47c 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.h
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_tmr_req;
+
extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 0814e5894a96..2e7e08dbda48 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
@@ -260,7 +260,6 @@ err_out:
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
- kfree(tpg);
return -ENOMEM;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da211920c18..ceba29851167 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -1,6 +1,15 @@
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsi_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 08217d62fb0d..c4eb141c6435 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -1,5 +1,6 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1f38177207e0..b5a1b4ccba12 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/percpu_ida.h>
+#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 995f1cb29d0e..8ff08856516a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -1,8 +1,16 @@
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
#define MARKER_SIZE 8
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 4346462094a1..a8a230b4e6b5 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,3 +1,7 @@
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 58bb6ed18185..e5c3e5f827d0 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <scsi/scsi_proto.h>
@@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
struct sbp_target_request *req;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe19003..f5e330099bfc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -26,8 +26,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
+#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/fcntl.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 9b250f9b33bf..c69c11baf07f 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
+#include <target/target_core_base.h>
+
/*
* INQUIRY response data, TPGS Field
*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a35a347ec357..54b36c9835be 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -144,12 +144,12 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
return -EINVAL;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
- filp_close(fp, 0);
+ filp_close(fp, NULL);
mutex_unlock(&g_tf_lock);
pr_err("db_root: not a directory: %s\n", db_root_stage);
return -EINVAL;
}
- filp_close(fp, 0);
+ filp_close(fp, NULL);
strncpy(db_root, db_root_stage, read_bytes);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 6b423485c5d6..1ebd13ef7bd3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -33,6 +33,7 @@
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d545993df18b..87aa376a1a1a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/falloc.h>
+#include <linux/uio.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 068966fce308..526595a072de 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
+#include <target/target_core_base.h>
+
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 01c2afd81500..718d3fcd3e7c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -1,6 +1,9 @@
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
+#include <linux/atomic.h>
+#include <target/target_core_base.h>
+
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a9d61c..9ab7090f7c83 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,6 +1,11 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 47463c99c318..d761025144f9 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
- pr_err("LongIO and Obselete Bits set, returning"
- " ILLEGAL_REQUEST\n");
+ pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index e3d26e9126a0..847bd470339c 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -1,5 +1,9 @@
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
/*
* PERSISTENT_RESERVE_OUT service action codes
*
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 6d2007e35df6..8a02fa47c7e8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -15,11 +15,12 @@
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
-#include <linux/device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
+#include <linux/cache.h> /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
+struct block_device;
struct scsi_device;
+struct Scsi_Host;
struct pscsi_plugin_task {
unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 24b36fd785f1..ddc216c9f1f6 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -26,7 +26,9 @@
#include <linux/string.h>
#include <linux/parser.h>
+#include <linux/highmem.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi_proto.h>
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index cc46a6a89b38..91fc1a34791d 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -1,6 +1,10 @@
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define RD_HBA_VERSION "v4.0"
#define RD_MCP_VERSION "4.0"
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 04f616b3ba0a..4879e70e2eef 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index bd6e78ba153d..97402856a8f0 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_UA_H
#define TARGET_CORE_UA_H
+#include <target/target_core_base.h>
+
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 2b3c8564ace8..8041710b6972 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -27,6 +27,7 @@
#include <linux/uio_driver.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
+#include <linux/highmem.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int ret;
+ sense_reason_t ret;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
@@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
cmd->se_cmd = NULL;
- kmem_cache_free(tcmu_cmd_cache, cmd);
-
return 0;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 094a1440eacb..37d5caebffa6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/configfs.h>
+#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 700a981c7b41..4d3d4dd060f2 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,3 +1,5 @@
+#include <target/target_core_base.h>
+
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index e28209b99b59..11d27b93b413 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -17,6 +17,9 @@
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 197f73386fac..d2351139342f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
struct usbg_cmd *cmd;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
index c6bad51d8ec7..b914cfb03820 100644
--- a/fs/befs/befs.h
+++ b/fs/befs/befs.h
@@ -129,6 +129,7 @@ static inline befs_inode_addr
blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
{
befs_inode_addr iaddr;
+
iaddr.allocation_group = blockno >> BEFS_SB(sb)->ag_shift;
iaddr.start =
blockno - (iaddr.allocation_group << BEFS_SB(sb)->ag_shift);
@@ -140,7 +141,7 @@ blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
static inline unsigned int
befs_iaddrs_per_block(struct super_block *sb)
{
- return BEFS_SB(sb)->block_size / sizeof (befs_disk_inode_addr);
+ return BEFS_SB(sb)->block_size / sizeof(befs_disk_inode_addr);
}
#include "endian.h"
diff --git a/fs/befs/befs_fs_types.h b/fs/befs/befs_fs_types.h
index eb557d9dc8be..69c9d8cde955 100644
--- a/fs/befs/befs_fs_types.h
+++ b/fs/befs/befs_fs_types.h
@@ -55,12 +55,12 @@ enum super_flags {
};
#define BEFS_BYTEORDER_NATIVE 0x42494745
-#define BEFS_BYTEORDER_NATIVE_LE (__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE)
-#define BEFS_BYTEORDER_NATIVE_BE (__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE)
+#define BEFS_BYTEORDER_NATIVE_LE ((__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE))
+#define BEFS_BYTEORDER_NATIVE_BE ((__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE))
#define BEFS_SUPER_MAGIC BEFS_SUPER_MAGIC1
-#define BEFS_SUPER_MAGIC1_LE (__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1)
-#define BEFS_SUPER_MAGIC1_BE (__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1)
+#define BEFS_SUPER_MAGIC1_LE ((__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1))
+#define BEFS_SUPER_MAGIC1_BE ((__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1))
/*
* Flags of inode
@@ -79,7 +79,7 @@ enum inode_flags {
BEFS_INODE_WAS_WRITTEN = 0x00020000,
BEFS_NO_TRANSACTION = 0x00040000,
};
-/*
+/*
* On-Disk datastructures of BeFS
*/
@@ -139,7 +139,7 @@ typedef struct {
} PACKED befs_super_block;
-/*
+/*
* Note: the indirect and dbl_indir block_runs may
* be longer than one block!
*/
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 7e135ea73fdd..d509887c580c 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -12,8 +12,8 @@
*
* Dominic Giampaolo, author of "Practical File System
* Design with the Be File System", for such a helpful book.
- *
- * Marcus J. Ranum, author of the b+tree package in
+ *
+ * Marcus J. Ranum, author of the b+tree package in
* comp.sources.misc volume 10. This code is not copied from that
* work, but it is partially based on it.
*
@@ -38,38 +38,38 @@
*/
/* Befs B+tree structure:
- *
+ *
* The first thing in the tree is the tree superblock. It tells you
* all kinds of useful things about the tree, like where the rootnode
* is located, and the size of the nodes (always 1024 with current version
* of BeOS).
*
* The rest of the tree consists of a series of nodes. Nodes contain a header
- * (struct befs_btree_nodehead), the packed key data, an array of shorts
+ * (struct befs_btree_nodehead), the packed key data, an array of shorts
* containing the ending offsets for each of the keys, and an array of
- * befs_off_t values. In interior nodes, the keys are the ending keys for
- * the childnode they point to, and the values are offsets into the
- * datastream containing the tree.
+ * befs_off_t values. In interior nodes, the keys are the ending keys for
+ * the childnode they point to, and the values are offsets into the
+ * datastream containing the tree.
*/
/* Note:
- *
- * The book states 2 confusing things about befs b+trees. First,
+ *
+ * The book states 2 confusing things about befs b+trees. First,
* it states that the overflow field of node headers is used by internal nodes
* to point to another node that "effectively continues this one". Here is what
* I believe that means. Each key in internal nodes points to another node that
- * contains key values less than itself. Inspection reveals that the last key
- * in the internal node is not the last key in the index. Keys that are
- * greater than the last key in the internal node go into the overflow node.
+ * contains key values less than itself. Inspection reveals that the last key
+ * in the internal node is not the last key in the index. Keys that are
+ * greater than the last key in the internal node go into the overflow node.
* I imagine there is a performance reason for this.
*
- * Second, it states that the header of a btree node is sufficient to
- * distinguish internal nodes from leaf nodes. Without saying exactly how.
+ * Second, it states that the header of a btree node is sufficient to
+ * distinguish internal nodes from leaf nodes. Without saying exactly how.
* After figuring out the first, it becomes obvious that internal nodes have
* overflow nodes and leafnodes do not.
*/
-/*
+/*
* Currently, this code is only good for directory B+trees.
* In order to be used for other BFS indexes, it needs to be extended to handle
* duplicate keys and non-string keytypes (int32, int64, float, double).
@@ -237,8 +237,8 @@ befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
* with @key (usually the disk block number of an inode).
*
* On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND.
- *
- * Algorithm:
+ *
+ * Algorithm:
* Read the superblock and rootnode of the b+tree.
* Drill down through the interior nodes using befs_find_key().
* Once at the correct leaf node, use befs_find_key() again to get the
@@ -402,12 +402,12 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
*
* Here's how it works: Key_no is the index of the key/value pair to
* return in keybuf/value.
- * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is
+ * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is
* the number of characters in the key (just a convenience).
*
* Algorithm:
* Get the first leafnode of the tree. See if the requested key is in that
- * node. If not, follow the node->right link to the next leafnode. Repeat
+ * node. If not, follow the node->right link to the next leafnode. Repeat
* until the (key_no)th key is found or the tree is out of keys.
*/
int
@@ -536,7 +536,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
* @node_off: Pointer to offset of current node within datastream. Modified
* by the function.
*
- * Helper function for btree traverse. Moves the current position to the
+ * Helper function for btree traverse. Moves the current position to the
* start of the first leaf node.
*
* Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY.
@@ -592,10 +592,10 @@ befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
}
/**
- * befs_leafnode - Determine if the btree node is a leaf node or an
+ * befs_leafnode - Determine if the btree node is a leaf node or an
* interior node
* @node: Pointer to node structure to test
- *
+ *
* Return 1 if leaf, 0 if interior
*/
static int
@@ -656,7 +656,7 @@ befs_bt_valarray(struct befs_btree_node *node)
* @node: Pointer to the node structure to find the keydata array within
*
* Returns a pointer to the start of the keydata array
- * of the node pointed to by the node header
+ * of the node pointed to by the node header
*/
static char *
befs_bt_keydata(struct befs_btree_node *node)
@@ -702,7 +702,7 @@ befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node,
/**
* befs_compare_strings - compare two strings
- * @key1: pointer to the first key to be compared
+ * @key1: pointer to the first key to be compared
* @keylen1: length in bytes of key1
* @key2: pointer to the second key to be compared
* @keylen2: length in bytes of key2
diff --git a/fs/befs/btree.h b/fs/befs/btree.h
index f2a8f637e9e0..60c6c728e64e 100644
--- a/fs/befs/btree.h
+++ b/fs/befs/btree.h
@@ -1,13 +1,11 @@
/*
* btree.h
- *
+ *
*/
-
int befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
- const char *key, befs_off_t * value);
+ const char *key, befs_off_t *value);
int befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
loff_t key_no, size_t bufsize, char *keybuf,
- size_t * keysize, befs_off_t * value);
-
+ size_t *keysize, befs_off_t *value);
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index b4c7ba013c0d..720b3bc5c16a 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -84,13 +84,11 @@ befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
*
* Takes a file position and gives back a brun who's starting block
* is block number fblock of the file.
- *
+ *
* Returns BEFS_OK or BEFS_ERR.
- *
+ *
* Calls specialized functions for each of the three possible
* datastream regions.
- *
- * 2001-11-15 Will Dyson
*/
int
befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
@@ -120,7 +118,7 @@ befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
/**
* befs_read_lsmylink - read long symlink from datastream.
- * @sb: Filesystem superblock
+ * @sb: Filesystem superblock
* @ds: Datastream to read from
* @buff: Buffer in which to place long symlink data
* @len: Length of the long symlink in bytes
diff --git a/fs/befs/datastream.h b/fs/befs/datastream.h
index 91ba8203d83f..7ff9ff09ec6e 100644
--- a/fs/befs/datastream.h
+++ b/fs/befs/datastream.h
@@ -5,10 +5,10 @@
struct buffer_head *befs_read_datastream(struct super_block *sb,
const befs_data_stream *ds,
- befs_off_t pos, uint * off);
+ befs_off_t pos, uint *off);
int befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
- befs_blocknr_t fblock, befs_block_run * run);
+ befs_blocknr_t fblock, befs_block_run *run);
size_t befs_read_lsymlink(struct super_block *sb, const befs_data_stream *data,
void *buff, befs_off_t len);
@@ -17,4 +17,3 @@ befs_blocknr_t befs_count_blocks(struct super_block *sb,
const befs_data_stream *ds);
extern const befs_inode_addr BAD_IADDR;
-
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index 85c13392e9e8..36656c86f50e 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -1,6 +1,6 @@
/*
* linux/fs/befs/debug.c
- *
+ *
* Copyright (C) 2001 Will Dyson (will_dyson at pobox.com)
*
* With help from the ntfs-tng driver by Anton Altparmakov
@@ -57,6 +57,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
struct va_format vaf;
va_list args;
+
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -67,7 +68,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
}
void
-befs_dump_inode(const struct super_block *sb, befs_inode * inode)
+befs_dump_inode(const struct super_block *sb, befs_inode *inode)
{
#ifdef CONFIG_BEFS_DEBUG
@@ -151,7 +152,7 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode)
*/
void
-befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
+befs_dump_super_block(const struct super_block *sb, befs_super_block *sup)
{
#ifdef CONFIG_BEFS_DEBUG
@@ -202,7 +203,7 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
#if 0
/* unused */
void
-befs_dump_small_data(const struct super_block *sb, befs_small_data * sd)
+befs_dump_small_data(const struct super_block *sb, befs_small_data *sd)
{
}
@@ -221,7 +222,8 @@ befs_dump_run(const struct super_block *sb, befs_disk_block_run run)
#endif /* 0 */
void
-befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * super)
+befs_dump_index_entry(const struct super_block *sb,
+ befs_disk_btree_super *super)
{
#ifdef CONFIG_BEFS_DEBUG
@@ -242,7 +244,7 @@ befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * supe
}
void
-befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead * node)
+befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *node)
{
#ifdef CONFIG_BEFS_DEBUG
diff --git a/fs/befs/inode.c b/fs/befs/inode.c
index fa4b718de597..5367a6470a69 100644
--- a/fs/befs/inode.c
+++ b/fs/befs/inode.c
@@ -1,6 +1,6 @@
/*
* inode.c
- *
+ *
* Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
*/
@@ -10,12 +10,12 @@
#include "inode.h"
/*
- Validates the correctness of the befs inode
- Returns BEFS_OK if the inode should be used, otherwise
- returns BEFS_BAD_INODE
-*/
+ * Validates the correctness of the befs inode
+ * Returns BEFS_OK if the inode should be used, otherwise
+ * returns BEFS_BAD_INODE
+ */
int
-befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
befs_blocknr_t inode)
{
u32 magic1 = fs32_to_cpu(sb, raw_inode->magic1);
diff --git a/fs/befs/inode.h b/fs/befs/inode.h
index 9dc7fd9b7570..2219e412f49b 100644
--- a/fs/befs/inode.h
+++ b/fs/befs/inode.h
@@ -1,8 +1,7 @@
/*
* inode.h
- *
+ *
*/
-int befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+int befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
befs_blocknr_t inode);
-
diff --git a/fs/befs/io.c b/fs/befs/io.c
index b4a558126ee1..227cb86e07fe 100644
--- a/fs/befs/io.c
+++ b/fs/befs/io.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
*
- * Based on portions of file.c and inode.c
+ * Based on portions of file.c and inode.c
* by Makoto Kato (m_kato@ga2.so-net.ne.jp)
*
* Many thanks to Dominic Giampaolo, author of Practical File System
@@ -19,8 +19,7 @@
/*
* Converts befs notion of disk addr to a disk offset and uses
* linux kernel function sb_bread() to get the buffer containing
- * the offset. -Will Dyson
- *
+ * the offset.
*/
struct buffer_head *
@@ -55,7 +54,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
befs_debug(sb, "<--- %s", __func__);
return bh;
- error:
+error:
befs_debug(sb, "<--- %s ERROR", __func__);
return NULL;
}
diff --git a/fs/befs/io.h b/fs/befs/io.h
index 78d7bc6e60de..9b3e1967cb31 100644
--- a/fs/befs/io.h
+++ b/fs/befs/io.h
@@ -4,4 +4,3 @@
struct buffer_head *befs_bread_iaddr(struct super_block *sb,
befs_inode_addr iaddr);
-
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 647a276eba56..19407165f4aa 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -18,6 +18,7 @@
#include <linux/parser.h>
#include <linux/namei.h>
#include <linux/sched.h>
+#include <linux/exportfs.h>
#include "befs.h"
#include "btree.h"
@@ -37,7 +38,8 @@ static int befs_readdir(struct file *, struct dir_context *);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
-static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int);
+static struct dentry *befs_lookup(struct inode *, struct dentry *,
+ unsigned int);
static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_destroy_inode(struct inode *inode);
@@ -51,6 +53,10 @@ static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
static int befs_statfs(struct dentry *, struct kstatfs *);
static int parse_options(char *, struct befs_mount_options *);
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type);
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type);
static const struct super_operations befs_sops = {
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
@@ -83,9 +89,14 @@ static const struct address_space_operations befs_symlink_aops = {
.readpage = befs_symlink_readpage,
};
-/*
+static const struct export_operations befs_export_operations = {
+ .fh_to_dentry = befs_fh_to_dentry,
+ .fh_to_parent = befs_fh_to_parent,
+};
+
+/*
* Called by generic_file_read() to read a page of data
- *
+ *
* In turn, simply calls a generic block read function and
* passes it the address of befs_get_block, for mapping file
* positions to disk blocks.
@@ -102,15 +113,13 @@ befs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, befs_get_block);
}
-/*
- * Generic function to map a file position (block) to a
+/*
+ * Generic function to map a file position (block) to a
* disk offset (passed back in bh_result).
*
* Used by many higher level functions.
*
* Calls befs_fblock2brun() in datastream.c to do the real work.
- *
- * -WD 10-26-01
*/
static int
@@ -269,15 +278,15 @@ befs_alloc_inode(struct super_block *sb)
struct befs_inode_info *bi;
bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL);
- if (!bi)
- return NULL;
- return &bi->vfs_inode;
+ if (!bi)
+ return NULL;
+ return &bi->vfs_inode;
}
static void befs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
+ kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
}
static void befs_destroy_inode(struct inode *inode)
@@ -287,7 +296,7 @@ static void befs_destroy_inode(struct inode *inode)
static void init_once(void *foo)
{
- struct befs_inode_info *bi = (struct befs_inode_info *) foo;
+ struct befs_inode_info *bi = (struct befs_inode_info *) foo;
inode_init_once(&bi->vfs_inode);
}
@@ -338,7 +347,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
/*
* set uid and gid. But since current BeOS is single user OS, so
* you can change by "uid" or "gid" options.
- */
+ */
inode->i_uid = befs_sb->mount_opts.use_uid ?
befs_sb->mount_opts.uid :
@@ -353,14 +362,14 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
* BEFS's time is 64 bits, but current VFS is 32 bits...
* BEFS don't have access time. Nor inode change time. VFS
* doesn't have creation time.
- * Also, the lower 16 bits of the last_modified_time and
+ * Also, the lower 16 bits of the last_modified_time and
* create_time are just a counter to help ensure uniqueness
* for indexing purposes. (PFD, page 54)
*/
inode->i_mtime.tv_sec =
fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
- inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
+ inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
inode->i_ctime = inode->i_mtime;
inode->i_atime = inode->i_mtime;
@@ -414,10 +423,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
unlock_new_inode(inode);
return inode;
- unacquire_bh:
+unacquire_bh:
brelse(bh);
- unacquire_none:
+unacquire_none:
iget_failed(inode);
befs_debug(sb, "<--- %s - Bad inode", __func__);
return ERR_PTR(-EIO);
@@ -442,7 +451,7 @@ befs_init_inodecache(void)
}
/* Called at fs teardown.
- *
+ *
* Taken from NFS implementation by Al Viro.
*/
static void
@@ -491,13 +500,10 @@ fail:
}
/*
- * UTF-8 to NLS charset convert routine
- *
+ * UTF-8 to NLS charset convert routine
*
- * Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than
- * the nls tables directly
+ * Uses uni2char() / char2uni() rather than the nls tables directly
*/
-
static int
befs_utf2nls(struct super_block *sb, const char *in,
int in_len, char **out, int *out_len)
@@ -521,9 +527,8 @@ befs_utf2nls(struct super_block *sb, const char *in,
}
*out = result = kmalloc(maxlen, GFP_NOFS);
- if (!*out) {
+ if (!*out)
return -ENOMEM;
- }
for (i = o = 0; i < in_len; i += utflen, o += unilen) {
@@ -546,7 +551,7 @@ befs_utf2nls(struct super_block *sb, const char *in,
return o;
- conv_err:
+conv_err:
befs_error(sb, "Name using character set %s contains a character that "
"cannot be converted to unicode.", nls->charset);
befs_debug(sb, "<--- %s", __func__);
@@ -561,18 +566,18 @@ befs_utf2nls(struct super_block *sb, const char *in,
* @in_len: Length of input string in bytes
* @out: The output string in UTF-8 format
* @out_len: Length of the output buffer
- *
+ *
* Converts input string @in, which is in the format of the loaded NLS map,
* into a utf8 string.
- *
+ *
* The destination string @out is allocated by this function and the caller is
* responsible for freeing it with kfree()
- *
+ *
* On return, *@out_len is the length of @out in bytes.
*
* On success, the return value is the number of utf8 characters written to
* the output buffer @out.
- *
+ *
* On Failure, a negative number coresponding to the error code is returned.
*/
@@ -585,9 +590,11 @@ befs_nls2utf(struct super_block *sb, const char *in,
wchar_t uni;
int unilen, utflen;
char *result;
- /* There're nls characters that will translate to 3-chars-wide UTF-8
- * characters, a additional byte is needed to save the final \0
- * in special cases */
+ /*
+ * There are nls characters that will translate to 3-chars-wide UTF-8
+ * characters, an additional byte is needed to save the final \0
+ * in special cases
+ */
int maxlen = (3 * in_len) + 1;
befs_debug(sb, "---> %s\n", __func__);
@@ -624,14 +631,41 @@ befs_nls2utf(struct super_block *sb, const char *in,
return i;
- conv_err:
- befs_error(sb, "Name using charecter set %s contains a charecter that "
+conv_err:
+ befs_error(sb, "Name using character set %s contains a character that "
"cannot be converted to unicode.", nls->charset);
befs_debug(sb, "<--- %s", __func__);
kfree(result);
return -EILSEQ;
}
+static struct inode *befs_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ /* No need to handle i_generation */
+ return befs_iget(sb, ino);
+}
+
+/*
+ * Map a NFS file handle to a corresponding dentry
+ */
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ befs_nfs_get_inode);
+}
+
+/*
+ * Find the parent for a file specified by NFS handle
+ */
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ befs_nfs_get_inode);
+}
+
enum {
Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
};
@@ -666,6 +700,7 @@ parse_options(char *options, struct befs_mount_options *opts)
while ((p = strsep(&options, ",")) != NULL) {
int token;
+
if (!*p)
continue;
@@ -721,7 +756,7 @@ parse_options(char *options, struct befs_mount_options *opts)
}
/* This function has the responsibiltiy of getting the
- * filesystem ready for unmounting.
+ * filesystem ready for unmounting.
* Basically, we free everything that we allocated in
* befs_read_inode
*/
@@ -782,8 +817,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
* Linux 2.4.10 and later refuse to read blocks smaller than
* the logical block size for the device. But we also need to read at
* least 1k to get the second 512 bytes of the volume.
- * -WD 10-26-01
- */
+ */
blocksize = sb_min_blocksize(sb, 1024);
if (!blocksize) {
if (!silent)
@@ -791,7 +825,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
goto unacquire_priv_sbp;
}
- if (!(bh = sb_bread(sb, sb_block))) {
+ bh = sb_bread(sb, sb_block);
+ if (!bh) {
if (!silent)
befs_error(sb, "unable to read superblock");
goto unacquire_priv_sbp;
@@ -816,7 +851,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
- if( befs_sb->num_blocks > ~((sector_t)0) ) {
+ if (befs_sb->num_blocks > ~((sector_t)0)) {
if (!silent)
befs_error(sb, "blocks count: %llu is larger than the host can use",
befs_sb->num_blocks);
@@ -831,6 +866,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
/* Set real blocksize of fs */
sb_set_blocksize(sb, (ulong) befs_sb->block_size);
sb->s_op = &befs_sops;
+ sb->s_export_op = &befs_export_operations;
root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
if (IS_ERR(root)) {
ret = PTR_ERR(root);
@@ -861,16 +897,16 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
}
return 0;
-/*****************/
- unacquire_bh:
+
+unacquire_bh:
brelse(bh);
- unacquire_priv_sbp:
+unacquire_priv_sbp:
kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
- unacquire_none:
+unacquire_none:
return ret;
}
@@ -919,7 +955,7 @@ static struct file_system_type befs_fs_type = {
.name = "befs",
.mount = befs_mount,
.kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("befs");
@@ -956,9 +992,9 @@ exit_befs_fs(void)
}
/*
-Macros that typecheck the init and exit functions,
-ensures that they are called at init and cleanup,
-and eliminates warnings about unused functions.
-*/
+ * Macros that typecheck the init and exit functions,
+ * ensures that they are called at init and cleanup,
+ * and eliminates warnings about unused functions.
+ */
module_init(init_befs_fs)
module_exit(exit_befs_fs)
diff --git a/fs/befs/super.h b/fs/befs/super.h
index dc4556376a22..ec1df30a7e9a 100644
--- a/fs/befs/super.h
+++ b/fs/befs/super.h
@@ -2,7 +2,5 @@
* super.h
*/
-int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb);
-
+int befs_load_sb(struct super_block *sb, befs_super_block *disk_sb);
int befs_check_sb(struct super_block *sb);
-
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index cb22a9f9ae7e..fad81041f5ab 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1273,8 +1273,8 @@ out_error:
*/
static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
{
- int error;
struct inode *inode = d_inode(dentry);
+ int error = 0;
/*
* I believe we can only get a negative dentry here in the case of a
@@ -1293,7 +1293,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
}
- error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (nfs_mapping_need_revalidate_inode(inode))
+ error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
__func__, inode->i_ino, error ? "invalid" : "valid");
return !error;
@@ -2285,8 +2286,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
if (cache == NULL)
goto out;
/* Found an entry, is our attribute cache valid? */
- if (!nfs_attribute_cache_expired(inode) &&
- !(nfsi->cache_validity & NFS_INO_INVALID_ATTR))
+ if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
break;
err = -ECHILD;
if (!may_block)
@@ -2334,12 +2334,12 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
cache = NULL;
if (cache == NULL)
goto out;
- err = nfs_revalidate_inode_rcu(NFS_SERVER(inode), inode);
- if (err)
+ if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
goto out;
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
+ err = 0;
out:
rcu_read_unlock();
return err;
@@ -2491,12 +2491,13 @@ EXPORT_SYMBOL_GPL(nfs_may_open);
static int nfs_execute_ok(struct inode *inode, int mask)
{
struct nfs_server *server = NFS_SERVER(inode);
- int ret;
+ int ret = 0;
- if (mask & MAY_NOT_BLOCK)
- ret = nfs_revalidate_inode_rcu(server, inode);
- else
- ret = nfs_revalidate_inode(server, inode);
+ if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) {
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+ ret = __nfs_revalidate_inode(server, inode);
+ }
if (ret == 0 && !execute_ok(inode))
ret = -EACCES;
return ret;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 55208b9b3c11..157cb43ce9db 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -101,21 +101,11 @@ EXPORT_SYMBOL_GPL(nfs_file_release);
static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_inode *nfsi = NFS_I(inode);
- const unsigned long force_reval = NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
- unsigned long cache_validity = nfsi->cache_validity;
-
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
- (cache_validity & force_reval) != force_reval)
- goto out_noreval;
if (filp->f_flags & O_DIRECT)
goto force_reval;
- if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
- goto force_reval;
- if (nfs_attribute_timeout(inode))
+ if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE))
goto force_reval;
-out_noreval:
return 0;
force_reval:
return __nfs_revalidate_inode(server, inode);
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index a5589b791439..f956ca20a8a3 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -282,7 +282,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
s->nfs_client->cl_minorversion);
out_test_devid:
- if (filelayout_test_devid_unavailable(devid))
+ if (ret->ds_clp == NULL ||
+ filelayout_test_devid_unavailable(devid))
ret = NULL;
out:
return ret;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 9e111d07f667..45962fe5098c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1126,7 +1126,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
case -EPIPE:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
- nfs4_mark_deviceid_unavailable(devid);
+ nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ &devid->deviceid);
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
@@ -1175,7 +1176,8 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
- nfs4_mark_deviceid_unavailable(devid);
+ nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ &devid->deviceid);
}
/* FIXME: Need to prevent infinite looping here. */
return -NFS4ERR_RESET_TO_PNFS;
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 3cc39d1c1206..e5a6f248697b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -177,7 +177,7 @@ out_err:
static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
struct nfs4_deviceid_node *devid)
{
- nfs4_mark_deviceid_unavailable(devid);
+ nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
if (!ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
lseg);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 5864146e05e6..011e4f8c1e01 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -160,6 +160,43 @@ int nfs_sync_mapping(struct address_space *mapping)
return ret;
}
+static int nfs_attribute_timeout(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static bool nfs_check_cache_invalid_delegated(struct inode *inode, unsigned long flags)
+{
+ unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+ /* Special case for the pagecache or access cache */
+ if (flags == NFS_INO_REVAL_PAGECACHE &&
+ !(cache_validity & NFS_INO_REVAL_FORCED))
+ return false;
+ return (cache_validity & flags) != 0;
+}
+
+static bool nfs_check_cache_invalid_not_delegated(struct inode *inode, unsigned long flags)
+{
+ unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+ if ((cache_validity & flags) != 0)
+ return true;
+ if (nfs_attribute_timeout(inode))
+ return true;
+ return false;
+}
+
+bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
+{
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ return nfs_check_cache_invalid_delegated(inode, flags);
+
+ return nfs_check_cache_invalid_not_delegated(inode, flags);
+}
+
static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
{
struct nfs_inode *nfsi = NFS_I(inode);
@@ -795,6 +832,8 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
if (!is_sync)
return;
inode = d_inode(ctx->dentry);
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ return;
nfsi = NFS_I(inode);
if (inode->i_mapping->nrpages == 0)
return;
@@ -1044,13 +1083,6 @@ out:
return status;
}
-int nfs_attribute_timeout(struct inode *inode)
-{
- struct nfs_inode *nfsi = NFS_I(inode);
-
- return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
-}
-
int nfs_attribute_cache_expired(struct inode *inode)
{
if (nfs_have_delegated_attributes(inode))
@@ -1073,15 +1105,6 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
-int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode)
-{
- if (!(NFS_I(inode)->cache_validity &
- (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
- && !nfs_attribute_cache_expired(inode))
- return NFS_STALE(inode) ? -ESTALE : 0;
- return -ECHILD;
-}
-
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
@@ -1114,17 +1137,8 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
bool nfs_mapping_need_revalidate_inode(struct inode *inode)
{
- unsigned long cache_validity = NFS_I(inode)->cache_validity;
-
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
- const unsigned long force_reval =
- NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
- return (cache_validity & force_reval) == force_reval;
- }
-
- return (cache_validity & NFS_INO_REVAL_PAGECACHE)
- || nfs_attribute_timeout(inode)
- || NFS_STALE(inode);
+ return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) ||
+ NFS_STALE(inode);
}
int nfs_revalidate_mapping_rcu(struct inode *inode)
@@ -1536,13 +1550,6 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
{
unsigned long invalid = NFS_INO_INVALID_ATTR;
- /*
- * Don't revalidate the pagecache if we hold a delegation, but do
- * force an attribute update
- */
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
- invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
-
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
nfs_set_cache_invalid(inode, invalid);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 6b79c2ca9b9a..09ca5095c04e 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -381,6 +381,7 @@ extern int nfs_drop_inode(struct inode *);
extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
void nfs_zap_acl_cache(struct inode *inode);
+extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
extern int nfs_wait_atomic_killable(atomic_t *p);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d33242c8d95d..6dcbc5defb7a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1089,8 +1089,15 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- if (!cinfo->atomic || cinfo->before != dir->i_version)
+ if (cinfo->atomic && cinfo->before == dir->i_version) {
+ nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+ nfsi->attrtimeo_timestamp = jiffies;
+ } else {
nfs_force_lookup_revalidate(dir);
+ if (cinfo->before != dir->i_version)
+ nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL;
+ }
dir->i_version = cinfo->after;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfs_fscache_invalidate(dir);
@@ -3115,6 +3122,16 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
res_stateid = &calldata->res.stateid;
renew_lease(server, calldata->timestamp);
break;
+ case -NFS4ERR_ACCESS:
+ if (calldata->arg.bitmask != NULL) {
+ calldata->arg.bitmask = NULL;
+ calldata->res.fattr = NULL;
+ task->tk_status = 0;
+ rpc_restart_call_prepare(task);
+ goto out_release;
+
+ }
+ break;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
@@ -3140,7 +3157,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
res_stateid, calldata->arg.fmode);
out_release:
nfs_release_seqid(calldata->arg.seqid);
- nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+ nfs_refresh_inode(calldata->inode, &calldata->fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
}
@@ -3193,9 +3210,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
goto out_wait;
}
- if (calldata->arg.fmode == 0) {
+ if (calldata->arg.fmode == 0)
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+ if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
/* Close-to-open cache consistency revalidation */
if (!nfs4_have_delegation(inode, FMODE_READ))
calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
@@ -3207,7 +3225,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
nfs4_map_atomic_open_share(NFS_SERVER(inode),
calldata->arg.fmode, 0);
- nfs_fattr_init(calldata->res.fattr);
+ if (calldata->res.fattr == NULL)
+ calldata->arg.bitmask = NULL;
+ else if (calldata->arg.bitmask == NULL)
+ calldata->res.fattr = NULL;
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(NFS_SERVER(inode),
&calldata->arg.seq_args,
@@ -3274,6 +3295,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
if (IS_ERR(calldata->arg.seqid))
goto out_free_calldata;
+ nfs_fattr_init(&calldata->fattr);
calldata->arg.fmode = 0;
calldata->lr.arg.ld_private = &calldata->lr.ld_private;
calldata->res.fattr = &calldata->fattr;
@@ -5673,6 +5695,14 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_STALE_STATEID:
task->tk_status = 0;
break;
+ case -NFS4ERR_ACCESS:
+ if (data->args.bitmask) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ task->tk_status = 0;
+ rpc_restart_call_prepare(task);
+ return;
+ }
default:
if (nfs4_async_handle_error(task, data->res.server,
NULL, NULL) == -EAGAIN) {
@@ -5692,6 +5722,7 @@ static void nfs4_delegreturn_release(void *calldata)
if (data->lr.roc)
pnfs_roc_release(&data->lr.arg, &data->lr.res,
data->res.lr_ret);
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
nfs_iput_and_deactive(inode);
}
kfree(calldata);
@@ -5780,10 +5811,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
if (status != 0)
goto out;
status = data->rpc_status;
- if (status == 0)
- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
- else
- nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 95baf7d340f0..1d152f4470cd 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -494,21 +494,18 @@ nfs4_alloc_state_owner(struct nfs_server *server,
}
static void
-nfs4_drop_state_owner(struct nfs4_state_owner *sp)
-{
- struct rb_node *rb_node = &sp->so_server_node;
-
- if (!RB_EMPTY_NODE(rb_node)) {
- struct nfs_server *server = sp->so_server;
- struct nfs_client *clp = server->nfs_client;
-
- spin_lock(&clp->cl_lock);
- if (!RB_EMPTY_NODE(rb_node)) {
- rb_erase(rb_node, &server->state_owners);
- RB_CLEAR_NODE(rb_node);
- }
- spin_unlock(&clp->cl_lock);
- }
+nfs4_reset_state_owner(struct nfs4_state_owner *sp)
+{
+ /* This state_owner is no longer usable, but must
+ * remain in place so that state recovery can find it
+ * and the opens associated with it.
+ * It may also be used for new 'open' request to
+ * return a delegation to the server.
+ * So update the 'create_time' so that it looks like
+ * a new state_owner. This will cause the server to
+ * request an OPEN_CONFIRM to start a new sequence.
+ */
+ sp->so_seqid.create_time = ktime_get();
}
static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
@@ -797,21 +794,33 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
/*
* Search the state->lock_states for an existing lock_owner
- * that is compatible with current->files
+ * that is compatible with either of the given owners.
+ * If the second is non-zero, then the first refers to a Posix-lock
+ * owner (current->files) and the second refers to a flock/OFD
+ * owner (struct file*). In that case, prefer a match for the first
+ * owner.
+ * If both sorts of locks are held on the one file we cannot know
+ * which stateid was intended to be used, so a "correct" choice cannot
+ * be made. Failing that, a "consistent" choice is preferable. The
+ * consistent choice we make is to prefer the first owner, that of a
+ * Posix lock.
*/
static struct nfs4_lock_state *
__nfs4_find_lock_state(struct nfs4_state *state,
fl_owner_t fl_owner, fl_owner_t fl_owner2)
{
- struct nfs4_lock_state *pos;
+ struct nfs4_lock_state *pos, *ret = NULL;
list_for_each_entry(pos, &state->lock_states, ls_locks) {
- if (pos->ls_owner != fl_owner &&
- pos->ls_owner != fl_owner2)
- continue;
- atomic_inc(&pos->ls_count);
- return pos;
+ if (pos->ls_owner == fl_owner) {
+ ret = pos;
+ break;
+ }
+ if (pos->ls_owner == fl_owner2)
+ ret = pos;
}
- return NULL;
+ if (ret)
+ atomic_inc(&ret->ls_count);
+ return ret;
}
/*
@@ -1101,7 +1110,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
if (status == -NFS4ERR_BAD_SEQID)
- nfs4_drop_state_owner(sp);
+ nfs4_reset_state_owner(sp);
if (!nfs4_has_session(sp->so_server->nfs_client))
nfs_increment_seqid(status, seqid);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1af6268a7d8c..e9255cb453e6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -502,11 +502,13 @@ static int nfs4_stat_to_errno(int);
(compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
+ encode_layoutreturn_maxsz + \
encode_open_downgrade_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
+ decode_layoutreturn_maxsz + \
decode_open_downgrade_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
@@ -2277,9 +2279,9 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
if (args->lr_args)
encode_layoutreturn(xdr, args->lr_args, &hdr);
- encode_close(xdr, args, &hdr);
if (args->bitmask != NULL)
encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_close(xdr, args, &hdr);
encode_nops(&hdr);
}
@@ -2356,6 +2358,8 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
+ if (args->lr_args)
+ encode_layoutreturn(xdr, args->lr_args, &hdr);
encode_open_downgrade(xdr, args, &hdr);
encode_nops(&hdr);
}
@@ -2701,7 +2705,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
encode_putfh(xdr, args->fhandle, &hdr);
if (args->lr_args)
encode_layoutreturn(xdr, args->lr_args, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_delegreturn(xdr, args->stateid, &hdr);
encode_nops(&hdr);
}
@@ -6151,6 +6156,12 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
status = decode_putfh(xdr);
if (status)
goto out;
+ if (res->lr_res) {
+ status = decode_layoutreturn(xdr, res->lr_res);
+ res->lr_ret = status;
+ if (status)
+ goto out;
+ }
status = decode_open_downgrade(xdr, res);
out:
return status;
@@ -6484,16 +6495,12 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
}
+ if (res->fattr != NULL) {
+ status = decode_getfattr(xdr, res->fattr, res->server);
+ if (status != 0)
+ goto out;
+ }
status = decode_close(xdr, res);
- if (status != 0)
- goto out;
- /*
- * Note: Server may do delete on close for this file
- * in which case the getattr call will fail with
- * an ESTALE error. Shouldn't be a problem,
- * though, since fattr->valid will remain unset.
- */
- decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6966,9 +6973,11 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
if (status)
goto out;
}
- status = decode_getfattr(xdr, res->fattr, res->server);
- if (status != 0)
- goto out;
+ if (res->fattr) {
+ status = decode_getfattr(xdr, res->fattr, res->server);
+ if (status != 0)
+ goto out;
+ }
status = decode_delegreturn(xdr);
out:
return status;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 896df7bdf85f..59554f3adf29 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1251,6 +1251,7 @@ bool pnfs_roc(struct inode *ino,
nfs4_stateid stateid;
enum pnfs_iomode iomode = 0;
bool layoutreturn = false, roc = false;
+ bool skip_read = false;
if (!nfs_have_layout(ino))
return false;
@@ -1270,18 +1271,27 @@ retry:
}
/* no roc if we hold a delegation */
- if (nfs4_check_delegation(ino, FMODE_READ))
- goto out_noroc;
+ if (nfs4_check_delegation(ino, FMODE_READ)) {
+ if (nfs4_check_delegation(ino, FMODE_WRITE))
+ goto out_noroc;
+ skip_read = true;
+ }
list_for_each_entry(ctx, &nfsi->open_files, list) {
state = ctx->state;
+ if (state == NULL)
+ continue;
/* Don't return layout if there is open file state */
- if (state != NULL && state->state != 0)
+ if (state->state & FMODE_WRITE)
goto out_noroc;
+ if (state->state & FMODE_READ)
+ skip_read = true;
}
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
+ if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
+ continue;
/* If we are sending layoutreturn, invalidate all valid lsegs */
if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
continue;
diff --git a/fs/splice.c b/fs/splice.c
index 8ed7c9d8c0fb..873d83104e79 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1087,7 +1087,13 @@ EXPORT_SYMBOL(do_splice_direct);
static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
{
- while (pipe->nrbufs == pipe->buffers) {
+ for (;;) {
+ if (unlikely(!pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ return -EPIPE;
+ }
+ if (pipe->nrbufs != pipe->buffers)
+ return 0;
if (flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
if (signal_pending(current))
@@ -1096,7 +1102,6 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
pipe_wait(pipe);
pipe->waiting_writers--;
}
- return 0;
}
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index d7d0f495a34e..303315b9693f 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -13,6 +13,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
+extern bool acpi_permanent_mmap;
+
void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5c7356adc10b..f5e10dd8e86b 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -513,10 +513,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table(acpi_string signature, u32 instance,
struct acpi_table_header
**out_table))
+ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table))
+
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_by_index(u32 table_index,
- struct acpi_table_header
- **out_table))
+ acpi_get_table_by_index(u32 table_index,
+ struct acpi_table_header
+ **out_table))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_install_table_handler(acpi_table_handler
handler, void *context))
@@ -965,15 +967,6 @@ void acpi_terminate_debugger(void);
/*
* Divergences
*/
-ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_with_size(acpi_string signature,
- u32 instance,
- struct acpi_table_header
- **out_table,
- acpi_size *tbl_size))
-
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_data_full(acpi_handle object,
acpi_object_handler handler,
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index c19700e2a2fe..da5708caf8a1 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -371,6 +371,7 @@ struct acpi_table_desc {
union acpi_name_union signature;
acpi_owner_id owner_id;
u8 flags;
+ u16 validation_count;
};
/* Masks for Flags field above */
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index a5509d87230a..7dbb1141f546 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -142,7 +142,6 @@ static inline void acpi_os_terminate_command_signals(void)
/*
* OSL interfaces added by Linux
*/
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
#endif /* __KERNEL__ */
diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h
deleted file mode 100644
index 99c6d903d439..000000000000
--- a/include/dt-bindings/net/mdio.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * This header provides generic constants for ethernet MDIO bindings
- */
-
-#ifndef _DT_BINDINGS_NET_MDIO_H
-#define _DT_BINDINGS_NET_MDIO_H
-
-/*
- * EEE capability Advertisement
- */
-
-#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
-#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
-#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
-#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
-#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
-#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
-
-#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 286b2a264383..83695641bd5e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -288,7 +288,6 @@ enum blk_queue_state {
struct blk_queue_tag {
struct request **tag_index; /* map of busy tags */
unsigned long *tag_map; /* bit map of free/busy tags */
- int busy; /* current depth */
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index a951fd10aaaa..6a524bf6a06d 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -18,6 +18,7 @@ enum cache_type {
/**
* struct cacheinfo - represent a cache leaf node
+ * @id: This cache's id. It is unique among caches with the same (type, level).
* @type: type of the cache - data, inst or unified
* @level: represents the hierarchy in the multi-level cache
* @coherency_line_size: size of each cache line usually representing
@@ -44,6 +45,7 @@ enum cache_type {
* keeping, the remaining members form the core properties of the cache
*/
struct cacheinfo {
+ unsigned int id;
enum cache_type type;
unsigned int level;
unsigned int coherency_line_size;
@@ -61,6 +63,7 @@ struct cacheinfo {
#define CACHE_WRITE_ALLOCATE BIT(3)
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+#define CACHE_ID BIT(4)
struct device_node *of_node;
bool disable_sysfs;
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 9a30b921f740..2319b8c108e8 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -35,14 +35,11 @@
#ifndef _CONFIGFS_H_
#define _CONFIGFS_H_
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-
-#include <linux/atomic.h>
+#include <linux/stat.h> /* S_IRUGO */
+#include <linux/types.h> /* ssize_t */
+#include <linux/list.h> /* struct list_head */
+#include <linux/kref.h> /* struct kref */
+#include <linux/mutex.h> /* struct mutex */
#define CONFIGFS_ITEM_NAME_LEN 20
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0eb7c2e7f0d6..7f6952f8d6aa 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,6 +11,7 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
+#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
@@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
+#ifdef CONFIG_IMA_KEXEC
+extern void ima_add_kexec_buffer(struct kimage *image);
+#endif
+
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
@@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
#endif /* CONFIG_IMA */
+#ifndef CONFIG_IMA_KEXEC
+struct kimage;
+
+static inline void ima_add_kexec_buffer(struct kimage *image)
+{}
+#endif
+
#ifdef CONFIG_IMA_APPRAISE
extern void ima_inode_post_setattr(struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index cb631973839a..f1da8c8dd473 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -340,10 +340,8 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 57c9e0622a38..56375edf2ed2 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
#ifdef CONFIG_PRINTK
-#define WARN_ON_RATELIMIT(condition, state) \
- WARN_ON((condition) && __ratelimit(state))
+#define WARN_ON_RATELIMIT(condition, state) ({ \
+ bool __rtn_cond = !!(condition); \
+ WARN_ON(__rtn_cond && __ratelimit(state)); \
+ __rtn_cond; \
+})
#define WARN_RATELIMIT(condition, format, ...) \
({ \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a440cf178191..4d1905245c7a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1821,6 +1821,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
+#ifdef CONFIG_INTEL_RDT_A
+ int closid;
+#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 931a47ba4571..1beab5532035 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -205,10 +205,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (dev) {
- ip4 = (struct in_device *)dev->ip_ptr;
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
+ ip4 = in_dev_get(dev);
+ if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
(struct in6_addr *)gid);
+ in_dev_put(ip4);
+ }
dev_put(dev);
}
}
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4ac24f5a3308..275581d483dd 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -1,12 +1,14 @@
#ifndef ISCSI_TARGET_CORE_H
#define ISCSI_TARGET_CORE_H
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/list.h> /* struct list_head */
+#include <linux/socket.h> /* struct sockaddr_storage */
+#include <linux/types.h> /* u8 */
+#include <scsi/iscsi_proto.h> /* itt_t */
+#include <target/target_core_base.h> /* struct se_cmd */
+
+struct sock;
#define ISCSIT_VERSION "v4.1.0"
#define ISCSI_MAX_DATASN_MISSING_COUNT 16
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index e615bb485d0b..c27dd471656d 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -1,6 +1,10 @@
#ifndef ISCSI_TARGET_STAT_H
#define ISCSI_TARGET_STAT_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+
/*
* For struct iscsi_tiqn->tiqn_wwn default groups
*/
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 40ac7cd80150..1277e9ba0318 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,6 +1,6 @@
-#include <linux/module.h>
-#include <linux/list.h>
-#include "iscsi_target_core.h"
+#include "iscsi_target_core.h" /* struct iscsi_cmd */
+
+struct sockaddr_storage;
struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index f6f3bc52c1ac..b54b98dc2d4a 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -1,8 +1,14 @@
#ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define TRANSPORT_FLAG_PASSTHROUGH 1
+struct request_queue;
+struct scatterlist;
+
struct target_backend_ops {
char name[16];
char inquiry_prod[16];
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 00558287936d..29e6858bb164 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -1,14 +1,10 @@
#ifndef TARGET_CORE_BASE_H
#define TARGET_CORE_BASE_H
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/percpu_ida.h>
-#include <linux/t10-pi.h>
-#include <net/sock.h>
-#include <net/tcp.h>
+#include <linux/configfs.h> /* struct config_group */
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/semaphore.h> /* struct semaphore */
#define TARGET_CORE_VERSION "v5.0"
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 5cd6faa6e0d1..358041bad1da 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -1,6 +1,10 @@
#ifndef TARGET_CORE_FABRIC_H
#define TARGET_CORE_FABRIC_H
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
struct target_core_fabric_ops {
struct module *module;
const char *name;
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 9bd559472c92..e230af2e6855 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -57,6 +57,7 @@
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
+#define RDTGROUP_SUPER_MAGIC 0x7655821
#define STACK_END_MAGIC 0x57AC6E9D
diff --git a/kernel/kcov.c b/kernel/kcov.c
index cc2fa35ca480..85e5546cd791 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kcov.h>
+#include <asm/setup.h>
/*
* kcov descriptor (one per opened debugfs file).
@@ -73,6 +74,11 @@ void notrace __sanitizer_cov_trace_pc(void)
if (mode == KCOV_MODE_TRACE) {
unsigned long *area;
unsigned long pos;
+ unsigned long ip = _RET_IP_;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ ip -= kaslr_offset();
+#endif
/*
* There is some code that runs in interrupts but for which
@@ -86,7 +92,7 @@ void notrace __sanitizer_cov_trace_pc(void)
/* The first word is number of subsequent PCs. */
pos = READ_ONCE(area[0]) + 1;
if (likely(pos < t->kcov_size)) {
- area[pos] = _RET_IP_;
+ area[pos] = ip;
WRITE_ONCE(area[0], pos);
}
}
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 0c2df7f73792..b56a558e406d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/fs.h>
+#include <linux/ima.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <linux/syscalls.h>
@@ -132,6 +133,9 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
return ret;
image->kernel_buf_len = size;
+ /* IMA needs to pass the measurement list to the next kernel. */
+ ima_add_kexec_buffer(image);
+
/* Call arch image probe handlers */
ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
image->kernel_buf_len);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7446097f72bd..cb66a4648840 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -26,7 +26,7 @@ config CONSOLE_LOGLEVEL_DEFAULT
the kernel bootargs. loglevel=<x> continues to override whatever
value is specified here as well.
- Note: This does not affect the log level of un-prefixed prink()
+ Note: This does not affect the log level of un-prefixed printk()
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 6c707bfe02fd..a43013112581 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -139,7 +139,20 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
}
if (end_index >= start_index) {
- unsigned long count = invalidate_mapping_pages(mapping,
+ unsigned long count;
+
+ /*
+ * It's common to FADV_DONTNEED right after
+ * the read or write that instantiates the
+ * pages, in which case there will be some
+ * sitting on the local LRU cache. Try to
+ * avoid the expensive remote drain and the
+ * second cache tree walk below by flushing
+ * them out right away.
+ */
+ lru_add_drain();
+
+ count = invalidate_mapping_pages(mapping,
start_index, end_index);
/*
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6c9615c90f37..618ab5079816 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -958,7 +958,7 @@ static int __ip_append_data(struct sock *sk,
csummode = CHECKSUM_PARTIAL;
cork->length += length;
- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d19044f2b1f4..c87d359b9b37 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2195,6 +2195,7 @@ static int validate_set(const struct nlattr *a,
case OVS_KEY_ATTR_ETHERNET:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
+ break;
case OVS_KEY_ATTR_TUNNEL:
if (masked)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 4c93badeabf2..ea961144084f 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -135,7 +135,7 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
- mr = container_of(node, struct rds_mr, r_rb_node);
+ mr = rb_entry(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 86309a3156a5..a4f738ac7728 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -136,7 +136,7 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
struct fq_flow *aux;
parent = *p;
- aux = container_of(parent, struct fq_flow, rate_node);
+ aux = rb_entry(parent, struct fq_flow, rate_node);
if (f->time_next_packet >= aux->time_next_packet)
p = &parent->rb_right;
else
@@ -188,7 +188,7 @@ static void fq_gc(struct fq_sched_data *q,
while (*p) {
parent = *p;
- f = container_of(parent, struct fq_flow, fq_node);
+ f = rb_entry(parent, struct fq_flow, fq_node);
if (f->sk == sk)
break;
@@ -256,7 +256,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
while (*p) {
parent = *p;
- f = container_of(parent, struct fq_flow, fq_node);
+ f = rb_entry(parent, struct fq_flow, fq_node);
if (f->sk == sk) {
/* socket might have been reallocated, so check
* if its sk_hash is the same.
@@ -424,7 +424,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
- struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
+ struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
if (f->time_next_packet > now) {
q->time_next_delayed_flow = f->time_next_packet;
@@ -563,7 +563,7 @@ static void fq_reset(struct Qdisc *sch)
for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
root = &q->fq_root[idx];
while ((p = rb_first(root)) != NULL) {
- f = container_of(p, struct fq_flow, fq_node);
+ f = rb_entry(p, struct fq_flow, fq_node);
rb_erase(p, root);
fq_flow_purge(f);
@@ -593,7 +593,7 @@ static void fq_rehash(struct fq_sched_data *q,
oroot = &old_array[idx];
while ((op = rb_first(oroot)) != NULL) {
rb_erase(op, oroot);
- of = container_of(op, struct fq_flow, fq_node);
+ of = rb_entry(op, struct fq_flow, fq_node);
if (fq_gc_candidate(of)) {
fcnt++;
kmem_cache_free(fq_flow_cachep, of);
@@ -606,7 +606,7 @@ static void fq_rehash(struct fq_sched_data *q,
while (*np) {
parent = *np;
- nf = container_of(parent, struct fq_flow, fq_node);
+ nf = rb_entry(parent, struct fq_flow, fq_node);
BUG_ON(nf->sk == of->sk);
if (nf->sk > of->sk)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 9f7b380cf0a3..b7e4097bfdab 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -152,7 +152,7 @@ struct netem_skb_cb {
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
{
- return container_of(rb, struct sk_buff, rbnode);
+ return rb_entry(rb, struct sk_buff, rbnode);
}
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 401c60750b20..1ebc184a0e23 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -292,6 +292,8 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
}
af->from_addr_param(&addr, rawaddr, htons(port), 0);
+ if (sctp_bind_addr_state(bp, &addr) != -1)
+ goto next;
retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
SCTP_ADDR_SRC, gfp);
if (retval) {
@@ -300,6 +302,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
break;
}
+next:
len = ntohs(param->length);
addrs_len -= len;
raw_addr_list += len;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 7b523e3f551f..616a9428e0c4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -205,26 +205,30 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
if (!addr->valid)
continue;
- if (sctp_in_scope(net, &addr->a, scope)) {
- /* Now that the address is in scope, check to see if
- * the address type is really supported by the local
- * sock as well as the remote peer.
- */
- if ((((AF_INET == addr->a.sa.sa_family) &&
- (copy_flags & SCTP_ADDR4_PEERSUPP))) ||
- (((AF_INET6 == addr->a.sa.sa_family) &&
- (copy_flags & SCTP_ADDR6_ALLOWED) &&
- (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
- error = sctp_add_bind_addr(bp, &addr->a,
- sizeof(addr->a),
- SCTP_ADDR_SRC, GFP_ATOMIC);
- if (error)
- goto end_copy;
- }
- }
+ if (!sctp_in_scope(net, &addr->a, scope))
+ continue;
+
+ /* Now that the address is in scope, check to see if
+ * the address type is really supported by the local
+ * sock as well as the remote peer.
+ */
+ if (addr->a.sa.sa_family == AF_INET &&
+ !(copy_flags & SCTP_ADDR4_PEERSUPP))
+ continue;
+ if (addr->a.sa.sa_family == AF_INET6 &&
+ (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR6_PEERSUPP)))
+ continue;
+
+ if (sctp_bind_addr_state(bp, &addr->a) != -1)
+ continue;
+
+ error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a),
+ SCTP_ADDR_SRC, GFP_ATOMIC);
+ if (error)
+ break;
}
-end_copy:
rcu_read_unlock();
return error;
}
diff --git a/scripts/selinux/genheaders/Makefile b/scripts/selinux/genheaders/Makefile
index 1d1ac51359e3..6fc2b8789a0b 100644
--- a/scripts/selinux/genheaders/Makefile
+++ b/scripts/selinux/genheaders/Makefile
@@ -1,4 +1,6 @@
hostprogs-y := genheaders
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+ -I$(srctree)/include/uapi -I$(srctree)/include \
+ -I$(srctree)/security/selinux/include
always := $(hostprogs-y)
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
index 539855ff31f9..f4dd41f900d5 100644
--- a/scripts/selinux/genheaders/genheaders.c
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -1,3 +1,7 @@
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
diff --git a/scripts/selinux/mdp/Makefile b/scripts/selinux/mdp/Makefile
index dba7eff69a00..d6a83cafe59f 100644
--- a/scripts/selinux/mdp/Makefile
+++ b/scripts/selinux/mdp/Makefile
@@ -1,5 +1,7 @@
hostprogs-y := mdp
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+ -I$(srctree)/include/uapi -I$(srctree)/include \
+ -I$(srctree)/security/selinux/include
always := $(hostprogs-y)
clean-files := policy.* file_contexts
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index e10beb11b696..c29fa4a6228d 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -24,6 +24,10 @@
* Authors: Serge E. Hallyn <serue@us.ibm.com>
*/
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 5487827fa86c..370eb2f4dd37 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -27,6 +27,18 @@ config IMA
to learn more about IMA.
If unsure, say N.
+config IMA_KEXEC
+ bool "Enable carrying the IMA measurement list across a soft boot"
+ depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+ default n
+ help
+ TPM PCRs are only reset on a hard reboot. In order to validate
+ a TPM's quote after a soft boot, the IMA measurement list of the
+ running kernel must be saved and restored on boot.
+
+ Depending on the IMA policy, the measurement list can grow to
+ be very large.
+
config IMA_MEASURE_PCR_IDX
int
depends on IMA
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 9aeaedad1e2b..29f198bde02b 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index db25f54a04fe..5e6180a4da7d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -28,6 +28,10 @@
#include "../integrity.h"
+#ifdef CONFIG_HAVE_IMA_KEXEC
+#include <asm/ima.h>
+#endif
+
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
@@ -81,6 +85,7 @@ struct ima_template_field {
/* IMA template descriptor definition */
struct ima_template_desc {
+ struct list_head list;
char *name;
char *fmt;
int num_fields;
@@ -102,6 +107,27 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+ u16 version;
+ u16 _reserved0;
+ u32 _reserved1;
+ u64 buffer_size;
+ u64 count;
+};
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format. The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
/* Internal IMA function definitions */
int ima_init(void);
int ima_fs_init(void);
@@ -122,7 +148,12 @@ int ima_init_crypto(void);
void ima_putc(struct seq_file *m, void *data, int datalen);
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
struct ima_template_desc *ima_template_desc_current(void);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
+void ima_init_template_list(void);
/*
* used to protect h_table and sha_table
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 38f2ed830dd6..802d5d20f36f 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -477,11 +477,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
u8 *data_to_hash = field_data[i].data;
u32 datalen = field_data[i].len;
+ u32 datalen_to_hash =
+ !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
rc = crypto_shash_update(shash,
- (const u8 *) &field_data[i].len,
- sizeof(field_data[i].len));
+ (const u8 *) &datalen_to_hash,
+ sizeof(datalen_to_hash));
if (rc)
break;
} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 3df46906492d..ca303e5d2b94 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -28,6 +28,16 @@
static DEFINE_MUTEX(ima_write_mutex);
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+ ima_canonical_fmt = 1;
+#endif
+ return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
static int valid_policy = 1;
#define TMPBUFLEN 12
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
@@ -116,13 +126,13 @@ void ima_putc(struct seq_file *m, void *data, int datalen)
* [eventdata length]
* eventdata[n]=template specific data
*/
-static int ima_measurements_show(struct seq_file *m, void *v)
+int ima_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
char *template_name;
- int namelen;
+ u32 pcr, namelen, template_data_len; /* temporary fields */
bool is_ima_template = false;
int i;
@@ -139,25 +149,29 @@ static int ima_measurements_show(struct seq_file *m, void *v)
* PCR used defaults to the same (config option) in
* little-endian format, unless set in policy
*/
- ima_putc(m, &e->pcr, sizeof(e->pcr));
+ pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
+ ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(template_name);
+ namelen = !ima_canonical_fmt ? strlen(template_name) :
+ cpu_to_le32(strlen(template_name));
ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, template_name, namelen);
+ ima_putc(m, template_name, strlen(template_name));
/* 5th: template length (except for 'ima' template) */
if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
is_ima_template = true;
- if (!is_ima_template)
- ima_putc(m, &e->template_data_len,
- sizeof(e->template_data_len));
+ if (!is_ima_template) {
+ template_data_len = !ima_canonical_fmt ? e->template_data_len :
+ cpu_to_le32(e->template_data_len);
+ ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+ }
/* 6th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 2ac1f41db5c0..2967d497a665 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -129,6 +129,8 @@ int __init ima_init(void)
if (rc != 0)
return rc;
+ ima_load_kexec_buffer();
+
rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
if (rc != 0)
return rc;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644
index 000000000000..e473eee913cb
--- /dev/null
+++ b/security/integrity/ima/ima_kexec.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+{
+ struct ima_queue_entry *qe;
+ struct seq_file file;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+ file.buf = vmalloc(segment_size);
+ if (!file.buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ file.size = segment_size;
+ file.read_pos = 0;
+ file.count = sizeof(khdr); /* reserved space */
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (file.count < file.size) {
+ khdr.count++;
+ ima_measurements_show(&file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto out;
+
+ /*
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+ khdr.buffer_size = file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+ memcpy(file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
+ 16, 1, file.buf,
+ file.count < 100 ? file.count : 100, true);
+
+ *buffer_size = file.count;
+ *buffer = file.buf;
+out:
+ if (ret == -EINVAL)
+ vfree(file.buf);
+ return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_mutex is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+ struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+ .buf_min = 0, .buf_max = ULONG_MAX,
+ .top_down = true };
+ unsigned long binary_runtime_size;
+
+ /* use more understandable variable names than defined in kbuf */
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size;
+ size_t kexec_segment_size;
+ int ret;
+
+ /*
+ * Reserve an extra half page of memory for additional measurements
+ * added during the kexec load.
+ */
+ binary_runtime_size = ima_get_binary_runtime_size();
+ if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+ kexec_segment_size = ULONG_MAX;
+ else
+ kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
+ PAGE_SIZE / 2, PAGE_SIZE);
+ if ((kexec_segment_size == ULONG_MAX) ||
+ ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
+ pr_err("Binary measurement list too large.\n");
+ return;
+ }
+
+ ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+ kexec_segment_size);
+ if (!kexec_buffer) {
+ pr_err("Not enough memory for the kexec measurement buffer.\n");
+ return;
+ }
+
+ kbuf.buffer = kexec_buffer;
+ kbuf.bufsz = kexec_buffer_size;
+ kbuf.memsz = kexec_segment_size;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ return;
+ }
+
+ ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ return;
+ }
+
+ pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+}
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void ima_load_kexec_buffer(void)
+{
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size = 0;
+ int rc;
+
+ rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+ switch (rc) {
+ case 0:
+ rc = ima_restore_measurement_list(kexec_buffer_size,
+ kexec_buffer);
+ if (rc != 0)
+ pr_err("Failed to restore the measurement list: %d\n",
+ rc);
+
+ ima_free_kexec_buffer();
+ break;
+ case -ENOTSUPP:
+ pr_debug("Restoring the measurement list not supported\n");
+ break;
+ case -ENOENT:
+ pr_debug("No measurement list to restore\n");
+ break;
+ default:
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 423d111b3b94..50818c60538b 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -418,6 +418,7 @@ static int __init init_ima(void)
{
int error;
+ ima_init_template_list();
hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
if (!error) {
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 32f6ac0f96df..d9aa5ab71204 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -29,6 +29,11 @@
#define AUDIT_CAUSE_LEN_MAX 32
LIST_HEAD(ima_measurements); /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
/* key: inode (before secure-hashing a file) */
struct ima_h_table ima_htable = {
@@ -64,12 +69,32 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
return ret;
}
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+ int size = 0;
+
+ size += sizeof(u32); /* pcr */
+ size += sizeof(entry->digest);
+ size += sizeof(int); /* template name size field */
+ size += strlen(entry->template_desc->name) + 1;
+ size += sizeof(entry->template_data_len);
+ size += entry->template_data_len;
+ return size;
+}
+
/* ima_add_template_entry helper function:
- * - Add template entry to measurement list and hash table.
+ * - Add template entry to the measurement list and hash table, for
+ * all entries except those carried across kexec.
*
* (Called with ima_extend_list_mutex held.)
*/
-static int ima_add_digest_entry(struct ima_template_entry *entry)
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+ bool update_htable)
{
struct ima_queue_entry *qe;
unsigned int key;
@@ -85,11 +110,34 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
list_add_tail_rcu(&qe->later, &ima_measurements);
atomic_long_inc(&ima_htable.len);
- key = ima_hash_key(entry->digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ if (update_htable) {
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ }
+
+ if (binary_runtime_size != ULONG_MAX) {
+ int size;
+
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+ binary_runtime_size + size : ULONG_MAX;
+ }
return 0;
}
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+ if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ return ULONG_MAX;
+ else
+ return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+};
+
static int ima_pcr_extend(const u8 *hash, int pcr)
{
int result = 0;
@@ -103,8 +151,13 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
return result;
}
-/* Add template entry to the measurement list and hash table,
- * and extend the pcr.
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode,
@@ -126,7 +179,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
}
- result = ima_add_digest_entry(entry);
+ result = ima_add_digest_entry(entry, 1);
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
@@ -149,3 +202,13 @@ out:
op, audit_cause, result, audit_info);
return result;
}
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+ int result = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ result = ima_add_digest_entry(entry, 0);
+ mutex_unlock(&ima_extend_list_mutex);
+ return result;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index febd12ed9b55..cebb37c63629 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -15,16 +15,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/rculist.h>
#include "ima.h"
#include "ima_template_lib.h"
-static struct ima_template_desc defined_templates[] = {
+static struct ima_template_desc builtin_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+
static struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
.field_show = ima_show_template_digest},
@@ -37,6 +41,7 @@ static struct ima_template_field supported_fields[] = {
{.field_id = "sig", .field_init = ima_eventsig_init,
.field_show = ima_show_template_sig},
};
+#define MAX_TEMPLATE_NAME_LEN 15
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
@@ -52,6 +57,8 @@ static int __init ima_template_setup(char *str)
if (ima_template)
return 1;
+ ima_init_template_list();
+
/*
* Verify that a template with the supplied name exists.
* If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
@@ -80,7 +87,7 @@ __setup("ima_template=", ima_template_setup);
static int __init ima_template_fmt_setup(char *str)
{
- int num_templates = ARRAY_SIZE(defined_templates);
+ int num_templates = ARRAY_SIZE(builtin_templates);
if (ima_template)
return 1;
@@ -91,22 +98,28 @@ static int __init ima_template_fmt_setup(char *str)
return 1;
}
- defined_templates[num_templates - 1].fmt = str;
- ima_template = defined_templates + num_templates - 1;
+ builtin_templates[num_templates - 1].fmt = str;
+ ima_template = builtin_templates + num_templates - 1;
+
return 1;
}
__setup("ima_template_fmt=", ima_template_fmt_setup);
static struct ima_template_desc *lookup_template_desc(const char *name)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
- if (strcmp(defined_templates[i].name, name) == 0)
- return defined_templates + i;
+ struct ima_template_desc *template_desc;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+ if ((strcmp(template_desc->name, name) == 0) ||
+ (strcmp(template_desc->fmt, name) == 0)) {
+ found = 1;
+ break;
+ }
}
-
- return NULL;
+ rcu_read_unlock();
+ return found ? template_desc : NULL;
}
static struct ima_template_field *lookup_template_field(const char *field_id)
@@ -142,9 +155,14 @@ static int template_desc_init_fields(const char *template_fmt,
{
const char *template_fmt_ptr;
struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
- int template_num_fields = template_fmt_size(template_fmt);
+ int template_num_fields;
int i, len;
+ if (num_fields && *num_fields > 0) /* already initialized? */
+ return 0;
+
+ template_num_fields = template_fmt_size(template_fmt);
+
if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
pr_err("format string '%s' contains too many fields\n",
template_fmt);
@@ -182,11 +200,28 @@ static int template_desc_init_fields(const char *template_fmt,
return 0;
}
+void ima_init_template_list(void)
+{
+ int i;
+
+ if (!list_empty(&defined_templates))
+ return;
+
+ spin_lock(&template_list);
+ for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+ list_add_tail_rcu(&builtin_templates[i].list,
+ &defined_templates);
+ }
+ spin_unlock(&template_list);
+}
+
struct ima_template_desc *ima_template_desc_current(void)
{
- if (!ima_template)
+ if (!ima_template) {
+ ima_init_template_list();
ima_template =
lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ }
return ima_template;
}
@@ -205,3 +240,239 @@ int __init ima_init_template(void)
return result;
}
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+ struct ima_template_desc *template_desc = NULL;
+ int ret;
+
+ ret = template_desc_init_fields(template_name, NULL, NULL);
+ if (ret < 0) {
+ pr_err("attempting to initialize the template \"%s\" failed\n",
+ template_name);
+ goto out;
+ }
+
+ template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+ if (!template_desc)
+ goto out;
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+ if (!template_desc->fmt)
+ goto out;
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+ spin_unlock(&template_list);
+out:
+ return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+ void *template_data,
+ int template_data_size,
+ struct ima_template_entry **entry)
+{
+ struct binary_field_data {
+ u32 len;
+ u8 data[0];
+ } __packed;
+
+ struct binary_field_data *field_data;
+ int offset = 0;
+ int ret = 0;
+ int i;
+
+ *entry = kzalloc(sizeof(**entry) +
+ template_desc->num_fields * sizeof(struct ima_field_data),
+ GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ field_data = template_data + offset;
+
+ /* Each field of the template data is prefixed with a length. */
+ if (offset > (template_data_size - sizeof(*field_data))) {
+ pr_err("Restoring the template field failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ offset += sizeof(*field_data);
+
+ if (ima_canonical_fmt)
+ field_data->len = le32_to_cpu(field_data->len);
+
+ if (offset > (template_data_size - field_data->len)) {
+ pr_err("Restoring the template field data failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ offset += field_data->len;
+
+ (*entry)->template_data[i].len = field_data->len;
+ (*entry)->template_data_len += sizeof(field_data->len);
+
+ (*entry)->template_data[i].data =
+ kzalloc(field_data->len + 1, GFP_KERNEL);
+ if (!(*entry)->template_data[i].data) {
+ ret = -ENOMEM;
+ break;
+ }
+ memcpy((*entry)->template_data[i].data, field_data->data,
+ field_data->len);
+ (*entry)->template_data_len += field_data->len;
+ }
+
+ if (ret < 0) {
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ }
+
+ return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+ struct binary_hdr_v1 {
+ u32 pcr;
+ u8 digest[TPM_DIGEST_SIZE];
+ u32 template_name_len;
+ char template_name[0];
+ } __packed;
+ char template_name[MAX_TEMPLATE_NAME_LEN];
+
+ struct binary_data_v1 {
+ u32 template_data_size;
+ char template_data[0];
+ } __packed;
+
+ struct ima_kexec_hdr *khdr = buf;
+ struct binary_hdr_v1 *hdr_v1;
+ struct binary_data_v1 *data_v1;
+
+ void *bufp = buf + sizeof(*khdr);
+ void *bufendp;
+ struct ima_template_entry *entry;
+ struct ima_template_desc *template_desc;
+ unsigned long count = 0;
+ int ret = 0;
+
+ if (!buf || size < sizeof(*khdr))
+ return 0;
+
+ if (ima_canonical_fmt) {
+ khdr->version = le16_to_cpu(khdr->version);
+ khdr->count = le64_to_cpu(khdr->count);
+ khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
+ }
+
+ if (khdr->version != 1) {
+ pr_err("attempting to restore a incompatible measurement list");
+ return -EINVAL;
+ }
+
+ if (khdr->count > ULONG_MAX - 1) {
+ pr_err("attempting to restore too many measurements");
+ return -EINVAL;
+ }
+
+ /*
+ * ima kexec buffer prefix: version, buffer size, count
+ * v1 format: pcr, digest, template-name-len, template-name,
+ * template-data-size, template-data
+ */
+ bufendp = buf + khdr->buffer_size;
+ while ((bufp < bufendp) && (count++ < khdr->count)) {
+ hdr_v1 = bufp;
+ if (bufp > (bufendp - sizeof(*hdr_v1))) {
+ pr_err("attempting to restore partial measurement\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += sizeof(*hdr_v1);
+
+ if (ima_canonical_fmt)
+ hdr_v1->template_name_len =
+ le32_to_cpu(hdr_v1->template_name_len);
+
+ if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) ||
+ (bufp > (bufendp - hdr_v1->template_name_len))) {
+ pr_err("attempting to restore a template name \
+ that is too long\n");
+ ret = -EINVAL;
+ break;
+ }
+ data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len;
+
+ /* template name is not null terminated */
+ memcpy(template_name, hdr_v1->template_name,
+ hdr_v1->template_name_len);
+ template_name[hdr_v1->template_name_len] = 0;
+
+ if (strcmp(template_name, "ima") == 0) {
+ pr_err("attempting to restore an unsupported \
+ template \"%s\" failed\n", template_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ template_desc = lookup_template_desc(template_name);
+ if (!template_desc) {
+ template_desc = restore_template_fmt(template_name);
+ if (!template_desc)
+ break;
+ }
+
+ /*
+ * Only the running system's template format is initialized
+ * on boot. As needed, initialize the other template formats.
+ */
+ ret = template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ if (ret < 0) {
+ pr_err("attempting to restore the template fmt \"%s\" \
+ failed\n", template_desc->fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (bufp > (bufendp - sizeof(data_v1->template_data_size))) {
+ pr_err("restoring the template data size failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += (u_int8_t) sizeof(data_v1->template_data_size);
+
+ if (ima_canonical_fmt)
+ data_v1->template_data_size =
+ le32_to_cpu(data_v1->template_data_size);
+
+ if (bufp > (bufendp - data_v1->template_data_size)) {
+ pr_err("restoring the template data failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += data_v1->template_data_size;
+
+ ret = ima_restore_template_data(template_desc,
+ data_v1->template_data,
+ data_v1->template_data_size,
+ &entry);
+ if (ret < 0)
+ break;
+
+ memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE);
+ entry->pcr =
+ !ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr);
+ ret = ima_restore_measurement_entry(entry);
+ if (ret < 0)
+ break;
+
+ }
+ return ret;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index f9bae04ba176..f9ba37b3928d 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -103,8 +103,11 @@ static void ima_show_template_data_binary(struct seq_file *m,
u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
strlen(field_data->data) : field_data->len;
- if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
- ima_putc(m, &len, sizeof(len));
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+ u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
+
+ ima_putc(m, &field_len, sizeof(field_len));
+ }
if (!len)
return;
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index e2d4ad3a4b4c..13ae49b0baa0 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,3 +1,5 @@
+#include <linux/capability.h>
+
#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
"getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index a2cdf3370afe..15d1d5c63c3c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,9 +384,6 @@ static void snd_complete_urb(struct urb *urb)
if (unlikely(atomic_read(&ep->chip->shutdown)))
goto exit_clear;
- if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
- goto exit_clear;
-
if (usb_pipeout(ep->pipe)) {
retire_outbound_urb(ep, ctx);
/* can be stopped during retire callback */
@@ -537,11 +534,6 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
alive, ep->ep_num);
clear_bit(EP_FLAG_STOPPING, &ep->flags);
- ep->data_subs = NULL;
- ep->sync_slave = NULL;
- ep->retire_data_urb = NULL;
- ep->prepare_data_urb = NULL;
-
return 0;
}
@@ -1028,6 +1020,10 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
if (--ep->use_count == 0) {
deactivate_urbs(ep, false);
+ ep->data_subs = NULL;
+ ep->sync_slave = NULL;
+ ep->retire_data_urb = NULL;
+ ep->prepare_data_urb = NULL;
set_bit(EP_FLAG_STOPPING, &ep->flags);
}
}